glusterfs
401 строка · 12.6 Кб
1#!/usr/bin/python3
2
3from __future__ import print_function4
5import os6from errno import EEXIST, ENOENT7
8from gluster.cliutils import (execute, Cmd, node_output_ok,9node_output_notok, execute_in_peers,10runcli, oknotok)11from prettytable import PrettyTable12
13LOG_DIR = "@localstatedir@/log/glusterfs/geo-replication-secondaries"14CLI_LOG = "@localstatedir@/log/glusterfs/cli.log"15GEOREP_DIR = "@GLUSTERD_WORKDIR@/geo-replication"16GLUSTERD_VOLFILE = "@GLUSTERD_VOLFILE@"17
18
19class MountbrokerUserMgmt(object):20def __init__(self, volfile):21self.volfile = volfile22self._options = {}23self.commented_lines = []24self.user_volumes = {}25self._parse()26
27def _parse(self):28""" Example glusterd.vol29volume management
30type mgmt/glusterd
31option working-directory /var/lib/glusterd
32option transport-type socket,rdma
33option transport.socket.keepalive-time 10
34option transport.socket.keepalive-interval 2
35option transport.socket.read-fail-log off
36option rpc-auth-allow-insecure on
37option ping-timeout 0
38option event-threads 1
39# option base-port 49152
40option mountbroker-root /var/mountbroker-root
41option mountbroker-geo-replication.user1 vol1,vol2,vol3
42option geo-replication-log-group geogroup
43option rpc-auth-allow-insecure on
44end-volume
45"""
46with open(self.volfile, "r") as f:47for line in f:48line = line.strip()49if line.startswith("option "):50key, value = line.split()[1:]51self._options[key] = value52if line.startswith("#"):53self.commented_lines.append(line)54
55for k, v in self._options.items():56if k.startswith("mountbroker-geo-replication."):57user = k.split(".")[-1]58self.user_volumes[user] = set(v.split(","))59
60def get_group(self):61return self._options.get("geo-replication-log-group", None)62
63def _get_write_data(self):64op = "volume management\n"65op += " type mgmt/glusterd\n"66for k, v in self._options.items():67if k.startswith("mountbroker-geo-replication."):68# Users will be added seperately69continue70
71op += " option %s %s\n" % (k, v)72
73for k, v in self.user_volumes.items():74if v:75op += (" option mountbroker-geo-replication."76"%s %s\n" % (k, ",".join(v)))77
78for line in self.commented_lines:79op += " %s\n" % line80
81op += "end-volume"82return op83
84def save(self):85with open(self.volfile + "_tmp", "w") as f:86f.write(self._get_write_data())87f.flush()88os.fsync(f.fileno())89os.rename(self.volfile + "_tmp", self.volfile)90
91def set_mount_root_and_group(self, mnt_root, group):92self._options["mountbroker-root"] = mnt_root93self._options["geo-replication-log-group"] = group94
95def add(self, volume, user):96user_volumes = self.user_volumes.get(user, None)97
98if user_volumes is not None and volume in user_volumes:99# User and Volume already exists100return101
102if user_volumes is None:103# User not exists104self.user_volumes[user] = set()105
106self.user_volumes[user].add(volume)107
108def remove(self, volume=None, user=None):109if user is not None:110if volume is None:111self.user_volumes[user] = set()112else:113try:114self.user_volumes.get(user, set()).remove(volume)115except KeyError:116pass117else:118if volume is None:119return120
121for k, v in self.user_volumes.items():122try:123self.user_volumes[k].remove(volume)124except KeyError:125pass126
127def info(self):128# Convert Volumes set into Volumes list129users = {}130for k, v in self.user_volumes.items():131users[k] = list(v)132
133data = {134"mountbroker-root": self._options.get("mountbroker-root", "None"),135"geo-replication-log-group": self._options.get(136"geo-replication-log-group", ""),137"users": users138}139
140return data141
142
143class NodeSetup(Cmd):144# Test if group exists using `getent group <grp>`145# and then group add using `groupadd <grp>`146# chgrp -R <grp> /var/log/glusterfs/geo-replication-secondaries147# chgrp -R <grp> /var/lib/glusterd/geo-replication148# chmod -R 770 /var/log/glusterfs/geo-replication-secondaries149# chmod 770 /var/lib/glusterd/geo-replication150# mkdir -p <mnt_root>151# chmod 0711 <mnt_root>152# If selinux,153# semanage fcontext -a -e /home /var/mountbroker-root154# restorecon -Rv /var/mountbroker-root155name = "node-setup"156
157def args(self, parser):158parser.add_argument("mount_root")159parser.add_argument("group")160
161def run(self, args):162m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)163
164try:165os.makedirs(args.mount_root)166except OSError as e:167if e.errno == EEXIST:168pass169else:170node_output_notok("Unable to Create {0}".format(171args.mount_root))172
173execute(["chmod", "0711", args.mount_root])174try:175execute(["semanage", "fcontext", "-a", "-e",176"/home", args.mount_root])177except OSError as e:178if e.errno == ENOENT:179pass180else:181node_output_notok(182"Unable to run semanage: {0}".format(e))183
184try:185execute(["restorecon", "-Rv", args.mount_root])186except OSError as e:187if e.errno == ENOENT:188pass189else:190node_output_notok(191"Unable to run restorecon: {0}".format(e))192
193rc, out, err = execute(["getent", "group", args.group])194if rc != 0:195node_output_notok("User Group not exists")196
197execute(["chgrp", "-R", args.group, GEOREP_DIR])198execute(["chgrp", "-R", args.group, LOG_DIR])199execute(["chgrp", args.group, CLI_LOG])200execute(["chmod", "770", GEOREP_DIR])201execute(["find", LOG_DIR, "-type", "d", "-exec", "chmod", "770", "{}",202"+"])203execute(["find", LOG_DIR, "-type", "f", "-exec", "chmod", "660", "{}",204"+"])205execute(["chmod", "660", CLI_LOG])206
207m.set_mount_root_and_group(args.mount_root, args.group)208m.save()209
210node_output_ok()211
212
213def color_status(value):214if value.lower() in ("up", "ok", "yes"):215return "green"216else:217return "red"218
219
220class CliSetup(Cmd):221# gluster-mountbroker setup <MOUNT_ROOT> <GROUP>222name = "setup"223
224def args(self, parser):225parser.add_argument("mount_root",226help="Path to the mountbroker-root directory.")227parser.add_argument("group",228help="Group to be used for setup.")229
230def run(self, args):231out = execute_in_peers("node-setup", [args.mount_root,232args.group])233table = PrettyTable(["NODE", "NODE STATUS", "SETUP STATUS"])234table.align["NODE STATUS"] = "r"235table.align["SETUP STATUS"] = "r"236for p in out:237table.add_row([p.hostname,238"UP" if p.node_up else "DOWN",239"OK" if p.ok else "NOT OK: {0}".format(240p.error)])241
242print(table)243
244
245class NodeStatus(Cmd):246# Check if Group exists247# Check if user exists248# Check directory permission /var/log/glusterfs/geo-replication-secondaries249# and /var/lib/glusterd/geo-replication250# Check mount root and its permissions251# Check glusterd.vol file for user, group, dir existance252name = "node-status"253
254def run(self, args):255m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)256data = m.info()257data["group_exists"] = False258data["path_exists"] = False259
260rc, out, err = execute(["getent", "group",261data["geo-replication-log-group"]])262
263if rc == 0:264data["group_exists"] = True265
266if os.path.exists(data["mountbroker-root"]):267data["path_exists"] = True268
269node_output_ok(data)270
271
272class CliStatus(Cmd):273# gluster-mountbroker status274name = "status"275
276def run(self, args):277out = execute_in_peers("node-status")278table = PrettyTable(["NODE", "NODE STATUS", "MOUNT ROOT",279"GROUP", "USERS"])280table.align["NODE STATUS"] = "r"281
282for p in out:283node_data = p.output284if node_data == "" or node_data == "N/A":285node_data = {}286
287users_row_data = ""288for k, v in node_data.get("users", {}).items():289users_row_data += "{0}({1}) ".format(k, ", ".join(v))290
291if not users_row_data:292users_row_data = "None"293
294mount_root = node_data.get("mountbroker-root", "None")295if mount_root != "None":296mount_root += "({0})".format(oknotok(297node_data.get("path_exists", False)))298
299grp = node_data.get("geo-replication-log-group", "None")300if grp != "None":301grp += "({0})".format(oknotok(302node_data.get("group_exists", False)))303
304table.add_row([p.hostname,305"UP" if p.node_up else "DOWN",306mount_root,307grp,308users_row_data])309
310print(table)311
312
313class NodeAdd(Cmd):314# useradd -m -g <grp> <usr>315# useradd to glusterd.vol316name = "node-add"317
318def args(self, parser):319parser.add_argument("volume")320parser.add_argument("user")321
322def run(self, args):323m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)324grp = m.get_group()325if grp is None:326node_output_notok("Group is not available")327
328m.add(args.volume, args.user)329m.save()330node_output_ok()331
332
333class CliAdd(Cmd):334# gluster-mountbroker add <VOLUME> <USER>335name = "add"336
337def args(self, parser):338parser.add_argument("volume",339help="Volume to be added.")340parser.add_argument("user",341help="User for which volume is to be added.")342
343def run(self, args):344out = execute_in_peers("node-add", [args.volume,345args.user])346table = PrettyTable(["NODE", "NODE STATUS", "ADD STATUS"])347table.align["NODE STATUS"] = "r"348table.align["ADD STATUS"] = "r"349
350for p in out:351table.add_row([p.hostname,352"UP" if p.node_up else "DOWN",353"OK" if p.ok else "NOT OK: {0}".format(354p.error)])355
356print(table)357
358
359class NodeRemove(Cmd):360# userremove from glusterd.vol file361name = "node-remove"362
363def args(self, parser):364parser.add_argument("volume")365parser.add_argument("user")366
367def run(self, args):368m = MountbrokerUserMgmt(GLUSTERD_VOLFILE)369volume = None if args.volume == "." else args.volume370user = None if args.user == "." else args.user371m.remove(volume=volume, user=user)372m.save()373node_output_ok()374
375
376class CliRemove(Cmd):377# gluster-mountbroker remove --volume <VOLUME> --user <USER>378name = "remove"379
380def args(self, parser):381parser.add_argument("--volume", default=".", help="Volume to be removed.")382parser.add_argument("--user", default=".",383help="User for which volume has to be removed.")384
385def run(self, args):386out = execute_in_peers("node-remove", [args.volume,387args.user])388table = PrettyTable(["NODE", "NODE STATUS", "REMOVE STATUS"])389table.align["NODE STATUS"] = "r"390table.align["REMOVE STATUS"] = "r"391
392for p in out:393table.add_row([p.hostname,394"UP" if p.node_up else "DOWN",395"OK" if p.ok else "NOT OK: {0}".format(396p.error)])397
398print(table)399
400if __name__ == "__main__":401runcli()402