glusterfs
3041 строка · 82.4 Кб
1/*
2Copyright (c) 2007-2012 Red Hat, Inc. <http://www.redhat.com>
3This file is part of GlusterFS.
4
5This file is licensed to you under your choice of the GNU Lesser
6General Public License, version 3 or any later version (LGPLv3 or
7later), or the GNU General Public License, version 2 (GPLv2), in all
8cases as published by the Free Software Foundation.
9*/
10#include <stdio.h>11#include <stdlib.h>12
13#include <glusterfs/statedump.h>14#include <glusterfs/syscall.h>15#include <glusterfs/monitoring.h>16#include "glusterd1-xdr.h"17#include "rpc-clnt.h"18#include "glusterfsd-messages.h"19#include "glusterfs3.h"20#include "portmap-xdr.h"21#include "glusterfsd.h"22#include "cli1-xdr.h"23#include "server.h"24
25static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;26
27static gf_boolean_t need_emancipate = _gf_false;28
29static int30mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)31{
32glusterfs_ctx_t *ctx = NULL;33
34ctx = glusterfsd_ctx;35gf_log("mgmt", GF_LOG_INFO, "Volume file changed");36
37glusterfs_volfile_fetch(ctx);38return 0;39}
40
41static int42mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,43dict_t *dict)44{
45glusterfs_ctx_t *ctx = NULL;46int ret = 0;47FILE *tmpfp = NULL;48gf_volfile_t *volfile_obj = NULL;49gf_volfile_t *volfile_tmp = NULL;50char sha256_hash[SHA256_DIGEST_LENGTH] = {510,52};53int tmp_fd = -1;54char template[] = "/tmp/glfs.volfile.XXXXXX";55
56glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash);57ctx = THIS->ctx;58LOCK(&ctx->volfile_lock);59{60list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)61{62if (!strcmp(volfile_id, volfile_obj->vol_id)) {63if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,64sizeof(volfile_obj->volfile_checksum))) {65UNLOCK(&ctx->volfile_lock);66gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,67NULL);68goto out;69}70volfile_tmp = volfile_obj;71break;72}73}74
75/* coverity[secure_temp] mkstemp uses 0600 as the mode */76tmp_fd = mkstemp(template);77if (-1 == tmp_fd) {78UNLOCK(&ctx->volfile_lock);79gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,80"create template=%s", template, NULL);81ret = -1;82goto out;83}84
85/* Calling unlink so that when the file is closed or program86* terminates the temporary file is deleted.
87*/
88ret = sys_unlink(template);89if (ret < 0) {90gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,91"delete template=%s", template, NULL);92ret = 0;93}94
95tmpfp = fdopen(tmp_fd, "w+b");96if (!tmpfp) {97ret = -1;98goto unlock;99}100
101fwrite(volfile, size, 1, tmpfp);102fflush(tmpfp);103if (ferror(tmpfp)) {104ret = -1;105goto unlock;106}107
108if (!volfile_tmp) {109/* There is no checksum in the list, which means simple attach110* the volfile
111*/
112ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,113sha256_hash, dict);114goto unlock;115}116ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,117sha256_hash, dict);118if (ret < 0) {119gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");120}121}122unlock:123UNLOCK(&ctx->volfile_lock);124out:125if (tmpfp)126fclose(tmpfp);127else if (tmp_fd != -1)128sys_close(tmp_fd);129return ret;130}
131
132static int133mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data)134{
135return 0;136}
137
138static struct iobuf *139glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,140struct iovec *outmsg, xdrproc_t xdrproc)141{
142struct iobuf *iob = NULL;143ssize_t retlen = -1;144ssize_t xdr_size = 0;145
146/* First, get the io buffer into which the reply in arg will147* be serialized.
148*/
149xdr_size = xdr_sizeof(xdrproc, arg);150iob = iobuf_get2(req->svc->ctx->iobuf_pool, xdr_size);151if (!iob) {152gf_log(THIS->name, GF_LOG_ERROR, "Failed to get iobuf");153goto ret;154}155
156iobuf_to_iovec(iob, outmsg);157/* Use the given serializer to translate the give C structure in arg158* to XDR format which will be written into the buffer in outmsg.
159*/
160/* retlen is used to received the error since size_t is unsigned and we161* need -1 for error notification during encoding.
162*/
163retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);164if (retlen == -1) {165gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");166GF_FREE(iob);167goto ret;168}169
170outmsg->iov_len = retlen;171ret:172if (retlen == -1) {173iob = NULL;174}175
176return iob;177}
178
179static int180glusterfs_submit_reply(rpcsvc_request_t *req, void *arg, struct iovec *payload,181int payloadcount, struct iobref *iobref,182xdrproc_t xdrproc)183{
184struct iobuf *iob = NULL;185int ret = -1;186struct iovec rsp = {1870,188};189char new_iobref = 0;190
191if (!req) {192GF_ASSERT(req);193goto out;194}195
196if (!iobref) {197iobref = iobref_new();198if (!iobref) {199gf_log(THIS->name, GF_LOG_ERROR, "out of memory");200goto out;201}202
203new_iobref = 1;204}205
206iob = glusterfs_serialize_reply(req, arg, &rsp, xdrproc);207if (!iob) {208gf_log_callingfn(THIS->name, GF_LOG_ERROR, "Failed to serialize reply");209} else {210iobref_add(iobref, iob);211}212
213ret = rpcsvc_submit_generic(req, &rsp, 1, payload, payloadcount, iobref);214
215if (ret == -1) {216gf_log(THIS->name, GF_LOG_ERROR, "Reply submission failed");217goto out;218}219
220ret = 0;221out:222if (iob)223iobuf_unref(iob);224
225if (new_iobref && iobref)226iobref_unref(iobref);227
228return ret;229}
230
231static int232glusterfs_terminate_response_send(rpcsvc_request_t *req, int op_ret)233{
234gd1_mgmt_brick_op_rsp rsp = {2350,236};237dict_t *dict = NULL;238int ret = 0;239
240rsp.op_ret = op_ret;241rsp.op_errno = 0;242rsp.op_errstr = "";243dict = dict_new();244
245if (dict)246ret = dict_allocate_and_serialize(dict, &rsp.output.output_val,247&rsp.output.output_len);248
249if (ret == 0)250ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,251(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);252
253GF_FREE(rsp.output.output_val);254if (dict)255dict_unref(dict);256return ret;257}
258
259static int260glusterfs_handle_terminate(rpcsvc_request_t *req)261{
262gd1_mgmt_brick_op_req xlator_req = {2630,264};265ssize_t ret;266glusterfs_ctx_t *ctx = NULL;267xlator_t *top = NULL;268xlator_t *victim = NULL;269xlator_t *tvictim = NULL;270xlator_list_t **trav_p = NULL;271gf_boolean_t lockflag = _gf_false;272gf_boolean_t still_bricks_attached = _gf_false;273dict_t *dict = NULL;274xlator_t *this = NULL;275char *value = NULL;276gf_boolean_t graceful_cleanup = _gf_false;277
278this = THIS;279ret = xdr_to_generic(req->msg[0], &xlator_req,280(xdrproc_t)xdr_gd1_mgmt_brick_op_req);281if (ret < 0) {282req->rpc_err = GARBAGE_ARGS;283return -1;284}285ctx = glusterfsd_ctx;286
287dict = dict_new();288if (!dict) {289return -1;290}291if (xlator_req.dict.dict_len) {292ret = dict_unserialize(xlator_req.dict.dict_val,293xlator_req.dict.dict_len, &dict);294if (ret < 0) {295gf_log(this->name, GF_LOG_ERROR,296"Failed to unserialize "297"req-buffer to dictionary");298goto err;299}300}301
302ret = dict_get_str(dict, GLUSTER_BRICK_GRACEFUL_CLEANUP, &value);303if (!ret) {304ret = gf_string2boolean(value, &graceful_cleanup);305if (ret)306graceful_cleanup = _gf_false;307}308
309LOCK(&ctx->volfile_lock);310{311/* Find the xlator_list_t that points to our victim. */312if (glusterfsd_ctx->active) {313top = glusterfsd_ctx->active->first;314for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {315victim = (*trav_p)->xlator;316if (!victim->cleanup_starting &&317strcmp(victim->name, xlator_req.name) == 0) {318break;319}320}321}322
323if (!top)324goto err;325}326if (!*trav_p) {327gf_log(this->name, GF_LOG_ERROR, "can't terminate %s - not found",328xlator_req.name);329/*330* Used to be -ENOENT. However, the caller asked us to
331* make sure it's down and if it's already down that's
332* good enough.
333*/
334glusterfs_terminate_response_send(req, 0);335goto err;336}337
338glusterfs_terminate_response_send(req, 0);339for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {340tvictim = (*trav_p)->xlator;341if (!tvictim->cleanup_starting &&342!strcmp(tvictim->name, xlator_req.name)) {343continue;344}345if (!tvictim->cleanup_starting) {346still_bricks_attached = _gf_true;347break;348}349}350/* Cleanup brick resource gracefully if enabled is true */351if (!still_bricks_attached && !graceful_cleanup) {352gf_log(this->name, GF_LOG_INFO,353"terminating after loss of last child %s", xlator_req.name);354rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name);355kill(getpid(), SIGTERM);356} else {357/* Check if detach brick is a last brick */358if (!still_bricks_attached && graceful_cleanup)359ctx->cleanup_starting = 1;360/* TODO cleanup sequence needs to be done properly for361Quota and Changelog
362*/
363if (victim->cleanup_starting)364goto err;365
366rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name);367victim->cleanup_starting = 1;368
369UNLOCK(&ctx->volfile_lock);370lockflag = _gf_true;371
372gf_log(this->name, GF_LOG_INFO,373"detaching not-only child %s "374" graceful_cleanup %d",375xlator_req.name, graceful_cleanup);376top->notify(top, GF_EVENT_CLEANUP, victim);377}378err:379if (!lockflag)380UNLOCK(&ctx->volfile_lock);381if (xlator_req.dict.dict_val)382free(xlator_req.dict.dict_val);383if (xlator_req.input.input_val)384free(xlator_req.input.input_val);385if (dict)386dict_unref(dict);387free(xlator_req.name);388xlator_req.name = NULL;389return 0;390}
391
392static int393glusterfs_translator_info_response_send(rpcsvc_request_t *req, int ret,394char *msg, dict_t *output)395{
396gd1_mgmt_brick_op_rsp rsp = {3970,398};399gf_boolean_t free_ptr = _gf_false;400GF_ASSERT(req);401
402rsp.op_ret = ret;403rsp.op_errno = 0;404if (ret && msg && msg[0])405rsp.op_errstr = msg;406else407rsp.op_errstr = "";408
409ret = -1;410if (output) {411ret = dict_allocate_and_serialize(output, &rsp.output.output_val,412&rsp.output.output_len);413}414if (!ret)415free_ptr = _gf_true;416
417glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,418(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);419ret = 0;420if (free_ptr)421GF_FREE(rsp.output.output_val);422return ret;423}
424
425static int426glusterfs_xlator_op_response_send(rpcsvc_request_t *req, int op_ret, char *msg,427dict_t *output)428{
429gd1_mgmt_brick_op_rsp rsp = {4300,431};432int ret = -1;433gf_boolean_t free_ptr = _gf_false;434GF_ASSERT(req);435
436rsp.op_ret = op_ret;437rsp.op_errno = 0;438if (op_ret && msg && msg[0])439rsp.op_errstr = msg;440else441rsp.op_errstr = "";442
443if (output) {444ret = dict_allocate_and_serialize(output, &rsp.output.output_val,445&rsp.output.output_len);446}447if (!ret)448free_ptr = _gf_true;449
450ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,451(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);452
453if (free_ptr)454GF_FREE(rsp.output.output_val);455
456return ret;457}
458
459static int460glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,461gf_boolean_t write_test)462{
463int32_t fd = -1;464int32_t output_fd = -1;465char export_path[PATH_MAX] = {4660,467};468char *buf = NULL;469int32_t iter = 0;470int32_t ret = -1;471uint64_t total_blks = 0;472uint32_t blk_size;473uint32_t blk_count;474double throughput = 0;475double time = 0;476struct timeval begin, end = {4770,478};479
480GF_ASSERT(brick_path);481
482ret = dict_get_uint32(dict, "blk-size", &blk_size);483if (ret)484goto out;485ret = dict_get_uint32(dict, "blk-cnt", &blk_count);486if (ret)487goto out;488
489if (!(blk_size > 0) || !(blk_count > 0))490goto out;491
492buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char);493if (!buf) {494ret = -1;495gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");496goto out;497}498
499snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,500".gf-tmp-stats-perf");501fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);502if (-1 == fd) {503ret = -1;504gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");505goto out;506}507
508gettimeofday(&begin, NULL);509for (iter = 0; iter < blk_count; iter++) {510ret = sys_write(fd, buf, blk_size);511if (ret != blk_size) {512ret = -1;513goto out;514}515total_blks += ret;516}517gettimeofday(&end, NULL);518if (total_blks != ((uint64_t)blk_size * blk_count)) {519gf_log("glusterd", GF_LOG_WARNING, "Error in write");520ret = -1;521goto out;522}523
524time = gf_tvdiff(&begin, &end);525throughput = total_blks / time;526gf_log("glusterd", GF_LOG_INFO,527"Throughput %.2f Mbps time %.2f secs "528"bytes written %" PRId64,529throughput, time, total_blks);530
531/* if it's a write test, we are done. Otherwise, we continue to the read532* part */
533if (write_test == _gf_true) {534ret = 0;535goto out;536}537
538ret = sys_fsync(fd);539if (ret) {540gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");541goto out;542}543ret = sys_lseek(fd, 0L, 0);544if (ret != 0) {545gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");546ret = -1;547goto out;548}549
550output_fd = open("/dev/null", O_RDWR);551if (-1 == output_fd) {552ret = -1;553gf_log("glusterd", GF_LOG_ERROR, "Could not open output file");554goto out;555}556
557total_blks = 0;558
559gettimeofday(&begin, NULL);560for (iter = 0; iter < blk_count; iter++) {561ret = sys_read(fd, buf, blk_size);562if (ret != blk_size) {563ret = -1;564goto out;565}566ret = sys_write(output_fd, buf, blk_size);567if (ret != blk_size) {568ret = -1;569goto out;570}571total_blks += ret;572}573gettimeofday(&end, NULL);574if (total_blks != ((uint64_t)blk_size * blk_count)) {575ret = -1;576gf_log("glusterd", GF_LOG_WARNING, "Error in read");577goto out;578}579
580time = gf_tvdiff(&begin, &end);581throughput = total_blks / time;582gf_log("glusterd", GF_LOG_INFO,583"Throughput %.2f Mbps time %.2f secs "584"bytes read %" PRId64,585throughput, time, total_blks);586ret = 0;587out:588if (fd >= 0)589sys_close(fd);590if (output_fd >= 0)591sys_close(output_fd);592GF_FREE(buf);593sys_unlink(export_path);594if (ret == 0) {595ret = dict_set_double(dict, "time", time);596if (ret)597goto end;598ret = dict_set_double(dict, "throughput", throughput);599if (ret)600goto end;601}602end:603return ret;604}
605
606static int607glusterfs_handle_translator_info_get(rpcsvc_request_t *req)608{
609int32_t ret = -1;610gd1_mgmt_brick_op_req xlator_req = {6110,612};613dict_t *dict = NULL;614xlator_t *this = NULL;615gf1_cli_top_op top_op = 0;616xlator_t *any = NULL;617xlator_t *xlator = NULL;618glusterfs_graph_t *active = NULL;619glusterfs_ctx_t *ctx = NULL;620char msg[2048] = {6210,622};623dict_t *output = NULL;624
625GF_ASSERT(req);626this = THIS;627GF_ASSERT(this);628
629ret = xdr_to_generic(req->msg[0], &xlator_req,630(xdrproc_t)xdr_gd1_mgmt_brick_op_req);631if (ret < 0) {632// failed to decode msg;633req->rpc_err = GARBAGE_ARGS;634goto out;635}636
637dict = dict_new();638ret = dict_unserialize(xlator_req.input.input_val,639xlator_req.input.input_len, &dict);640if (ret < 0) {641gf_log(this->name, GF_LOG_ERROR,642"failed to "643"unserialize req-buffer to dictionary");644goto out;645}646
647ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);648if (ret)649goto cont;650if (GF_CLI_TOP_READ_PERF == top_op) {651ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false);652} else if (GF_CLI_TOP_WRITE_PERF == top_op) {653ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true);654}655
656cont:657ctx = glusterfsd_ctx;658GF_ASSERT(ctx);659active = ctx->active;660if (active == NULL) {661gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");662ret = -1;663goto out;664}665any = active->first;666
667xlator = get_xlator_by_name(any, xlator_req.name);668if (!xlator) {669ret = -1;670snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);671goto out;672}673
674if (strcmp(xlator->type, "debug/io-stats")) {675xlator = get_xlator_by_type(xlator, "debug/io-stats");676if (!xlator) {677ret = -1;678snprintf(msg, sizeof(msg),679"xlator-type debug/io-stats is not loaded");680goto out;681}682}683
684output = dict_new();685ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_INFO, dict, output);686
687out:688ret = glusterfs_translator_info_response_send(req, ret, msg, output);689
690free(xlator_req.name);691free(xlator_req.input.input_val);692if (xlator_req.dict.dict_val)693free(xlator_req.dict.dict_val);694if (output)695dict_unref(output);696if (dict)697dict_unref(dict);698return ret;699}
700
701static int702glusterfs_handle_translator_op(rpcsvc_request_t *req)703{
704int32_t ret = -1;705int32_t op_ret = 0;706gd1_mgmt_brick_op_req xlator_req = {7070,708};709dict_t *input = NULL;710xlator_t *xlator = NULL;711xlator_t *any = NULL;712dict_t *output = NULL;713char key[32] = {0};714int len;715char *xname = NULL;716glusterfs_ctx_t *ctx = NULL;717glusterfs_graph_t *active = NULL;718xlator_t *this = NULL;719int i = 0;720int count = 0;721
722GF_ASSERT(req);723this = THIS;724GF_ASSERT(this);725
726ret = xdr_to_generic(req->msg[0], &xlator_req,727(xdrproc_t)xdr_gd1_mgmt_brick_op_req);728if (ret < 0) {729// failed to decode msg;730req->rpc_err = GARBAGE_ARGS;731goto out;732}733
734ctx = glusterfsd_ctx;735active = ctx->active;736if (!active) {737ret = -1;738gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,739"brick-op_no.=%d", xlator_req.op, NULL);740goto out;741}742any = active->first;743input = dict_new();744ret = dict_unserialize(xlator_req.input.input_val,745xlator_req.input.input_len, &input);746if (ret < 0) {747gf_log(this->name, GF_LOG_ERROR,748"failed to "749"unserialize req-buffer to dictionary");750goto out;751} else {752input->extra_stdfree = xlator_req.input.input_val;753}754
755ret = dict_get_int32(input, "count", &count);756
757output = dict_new();758if (!output) {759ret = -1;760goto out;761}762
763for (i = 0; i < count; i++) {764len = snprintf(key, sizeof(key), "xl-%d", i);765ret = dict_get_strn(input, key, len, &xname);766if (ret) {767gf_log(this->name, GF_LOG_ERROR,768"Couldn't get "769"xlator %s ",770key);771goto out;772}773xlator = xlator_search_by_name(any, xname);774if (!xlator) {775gf_log(this->name, GF_LOG_ERROR,776"xlator %s is not "777"loaded",778xname);779goto out;780}781}782for (i = 0; i < count; i++) {783len = snprintf(key, sizeof(key), "xl-%d", i);784ret = dict_get_strn(input, key, len, &xname);785xlator = xlator_search_by_name(any, xname);786XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);787/* If notify fails for an xlator we need to capture it but788* continue with the loop. */
789if (ret)790op_ret = -1;791}792ret = op_ret;793out:794glusterfs_xlator_op_response_send(req, ret, "", output);795if (input)796dict_unref(input);797if (output)798dict_unref(output);799free(xlator_req.name); // malloced by xdr800
801return 0;802}
803
804static int805glusterfs_handle_bitrot(rpcsvc_request_t *req)806{
807int32_t ret = -1;808gd1_mgmt_brick_op_req xlator_req = {8090,810};811dict_t *input = NULL;812dict_t *output = NULL;813xlator_t *any = NULL;814xlator_t *this = NULL;815xlator_t *xlator = NULL;816char msg[2048] = {8170,818};819char xname[1024] = {8200,821};822glusterfs_ctx_t *ctx = NULL;823glusterfs_graph_t *active = NULL;824char *scrub_opt = NULL;825
826GF_ASSERT(req);827this = THIS;828GF_ASSERT(this);829
830ret = xdr_to_generic(req->msg[0], &xlator_req,831(xdrproc_t)xdr_gd1_mgmt_brick_op_req);832
833if (ret < 0) {834/*failed to decode msg;*/835req->rpc_err = GARBAGE_ARGS;836goto out;837}838
839ctx = glusterfsd_ctx;840GF_ASSERT(ctx);841
842active = ctx->active;843if (!active) {844req->rpc_err = GARBAGE_ARGS;845goto out;846}847
848any = active->first;849
850input = dict_new();851if (!input)852goto out;853
854ret = dict_unserialize(xlator_req.input.input_val,855xlator_req.input.input_len, &input);856
857if (ret < 0) {858gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL);859goto out;860}861
862/* Send scrubber request to bitrot xlator */863snprintf(xname, sizeof(xname), "%s-bit-rot-0", xlator_req.name);864xlator = xlator_search_by_name(any, xname);865if (!xlator) {866snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);867gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL);868goto out;869}870
871output = dict_new();872if (!output) {873ret = -1;874goto out;875}876
877ret = dict_get_str(input, "scrub-value", &scrub_opt);878if (ret) {879snprintf(msg, sizeof(msg), "Failed to get scrub value");880gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL);881ret = -1;882goto out;883}884
885if (!strncmp(scrub_opt, "status", SLEN("status"))) {886ret = xlator->notify(xlator, GF_EVENT_SCRUB_STATUS, input, output);887} else if (!strncmp(scrub_opt, "ondemand", SLEN("ondemand"))) {888ret = xlator->notify(xlator, GF_EVENT_SCRUB_ONDEMAND, input, output);889if (ret == -2) {890snprintf(msg, sizeof(msg),891"Scrubber is in "892"Pause/Inactive/Running state");893ret = -1;894goto out;895}896}897out:898glusterfs_translator_info_response_send(req, ret, msg, output);899
900if (input)901dict_unref(input);902free(xlator_req.input.input_val); /*malloced by xdr*/903if (xlator_req.dict.dict_val)904free(xlator_req.dict.dict_val);905if (output)906dict_unref(output);907free(xlator_req.name);908
909return 0;910}
911
912static int913glusterfs_handle_attach(rpcsvc_request_t *req)914{
915int32_t ret = -1;916gd1_mgmt_brick_op_req xlator_req = {9170,918};919xlator_t *this = NULL;920xlator_t *nextchild = NULL;921glusterfs_graph_t *newgraph = NULL;922glusterfs_ctx_t *ctx = NULL;923xlator_t *srv_xl = NULL;924server_conf_t *srv_conf = NULL;925
926GF_ASSERT(req);927this = THIS;928GF_ASSERT(this);929
930ctx = this->ctx;931if (!ctx->cmd_args.volfile_id) {932gf_log(THIS->name, GF_LOG_ERROR,933"No volfile-id provided, erroring out");934return -1;935}936
937ret = xdr_to_generic(req->msg[0], &xlator_req,938(xdrproc_t)xdr_gd1_mgmt_brick_op_req);939
940if (ret < 0) {941/*failed to decode msg;*/942req->rpc_err = GARBAGE_ARGS;943return -1;944}945ret = 0;946
947if (!this->ctx->active) {948gf_log(this->name, GF_LOG_WARNING,949"got attach for %s but no active graph", xlator_req.name);950goto post_unlock;951}952
953gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);954
955LOCK(&ctx->volfile_lock);956{957ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,958&newgraph);959if (!ret && (newgraph && newgraph->first)) {960nextchild = newgraph->first;961ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);962if (ret) {963gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,964"event=ParentUp", "name=%s", nextchild->name, NULL);965goto unlock;966}967/* we need a protocol/server xlator as968* nextchild
969*/
970srv_xl = this->ctx->active->first;971srv_conf = (server_conf_t *)srv_xl->private;972rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);973}974if (ret) {975ret = -1;976}977ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);978if (ret) {979/* Response sent back to glusterd, req is already destroyed. So980* resetting the ret to 0. Otherwise another response will be
981* send from rpcsvc_check_and_reply_error. Which will lead to
982* double resource leak.
983*/
984ret = 0;985}986unlock:987UNLOCK(&ctx->volfile_lock);988}989post_unlock:990if (xlator_req.dict.dict_val)991free(xlator_req.dict.dict_val);992free(xlator_req.input.input_val);993free(xlator_req.name);994
995return ret;996}
997
998static int999glusterfs_handle_svc_attach(rpcsvc_request_t *req)1000{
1001int32_t ret = -1;1002gd1_mgmt_brick_op_req xlator_req = {10030,1004};1005xlator_t *this = NULL;1006dict_t *dict = NULL;1007
1008GF_ASSERT(req);1009this = THIS;1010GF_ASSERT(this);1011
1012ret = xdr_to_generic(req->msg[0], &xlator_req,1013(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1014
1015if (ret < 0) {1016/*failed to decode msg;*/1017req->rpc_err = GARBAGE_ARGS;1018goto out;1019}1020
1021gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s",1022xlator_req.name, NULL);1023
1024dict = dict_new();1025if (!dict) {1026ret = -1;1027errno = ENOMEM;1028goto out;1029}1030
1031ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,1032&dict);1033if (ret) {1034gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL);1035goto out;1036}1037dict->extra_stdfree = xlator_req.dict.dict_val;1038
1039ret = 0;1040
1041ret = mgmt_process_volfile(xlator_req.input.input_val,1042xlator_req.input.input_len, xlator_req.name,1043dict);1044out:1045if (dict)1046dict_unref(dict);1047if (xlator_req.input.input_val)1048free(xlator_req.input.input_val);1049if (xlator_req.name)1050free(xlator_req.name);1051glusterfs_translator_info_response_send(req, ret, NULL, NULL);1052return 0;1053}
1054
1055static int1056glusterfs_handle_svc_detach(rpcsvc_request_t *req)1057{
1058gd1_mgmt_brick_op_req xlator_req = {10590,1060};1061ssize_t ret;1062gf_volfile_t *volfile_obj = NULL;1063glusterfs_ctx_t *ctx = NULL;1064gf_volfile_t *volfile_tmp = NULL;1065
1066ret = xdr_to_generic(req->msg[0], &xlator_req,1067(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1068if (ret < 0) {1069req->rpc_err = GARBAGE_ARGS;1070return -1;1071}1072ctx = glusterfsd_ctx;1073
1074LOCK(&ctx->volfile_lock);1075{1076list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)1077{1078if (!strcmp(xlator_req.name, volfile_obj->vol_id)) {1079volfile_tmp = volfile_obj;1080break;1081}1082}1083
1084if (!volfile_tmp) {1085UNLOCK(&ctx->volfile_lock);1086gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s",1087xlator_req.name, NULL);1088/*1089* Used to be -ENOENT. However, the caller asked us to
1090* make sure it's down and if it's already down that's
1091* good enough.
1092*/
1093ret = 0;1094goto out;1095}1096/* coverity[ORDER_REVERSAL] */1097ret = glusterfs_process_svc_detach(ctx, volfile_tmp);1098if (ret) {1099UNLOCK(&ctx->volfile_lock);1100gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042,1101NULL);1102goto out;1103}1104}1105UNLOCK(&ctx->volfile_lock);1106out:1107glusterfs_terminate_response_send(req, ret);1108free(xlator_req.name);1109xlator_req.name = NULL;1110
1111return 0;1112}
1113
1114static int1115glusterfs_handle_dump_metrics(rpcsvc_request_t *req)1116{
1117int32_t ret = -1;1118gd1_mgmt_brick_op_req xlator_req = {11190,1120};1121xlator_t *this = NULL;1122glusterfs_ctx_t *ctx = NULL;1123char *filepath = NULL;1124int fd = -1;1125struct stat statbuf = {11260,1127};1128char *msg = NULL;1129
1130GF_ASSERT(req);1131this = THIS;1132GF_ASSERT(this);1133
1134ret = xdr_to_generic(req->msg[0], &xlator_req,1135(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1136
1137if (ret < 0) {1138/*failed to decode msg;*/1139req->rpc_err = GARBAGE_ARGS;1140return -1;1141}1142ret = -1;1143ctx = this->ctx;1144
1145/* Infra for monitoring */1146filepath = gf_monitor_metrics(ctx);1147if (!filepath)1148goto out;1149
1150fd = sys_open(filepath, O_RDONLY, 0);1151if (fd < 0)1152goto out;1153
1154if (sys_fstat(fd, &statbuf) < 0)1155goto out;1156
1157if (statbuf.st_size > GF_UNIT_MB) {1158gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,1159"reconsider logic (%" PRId64 ")", statbuf.st_size, NULL);1160}1161msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);1162if (!msg)1163goto out;1164
1165ret = sys_read(fd, msg, statbuf.st_size);1166if (ret < 0)1167goto out;1168
1169/* Send all the data in errstr, instead of dictionary for now */1170glusterfs_translator_info_response_send(req, 0, msg, NULL);1171
1172ret = 0;1173out:1174if (fd >= 0)1175sys_close(fd);1176
1177GF_FREE(msg);1178GF_FREE(filepath);1179if (xlator_req.input.input_val)1180free(xlator_req.input.input_val);1181if (xlator_req.dict.dict_val)1182free(xlator_req.dict.dict_val);1183
1184return ret;1185}
1186
1187static int1188glusterfs_handle_defrag(rpcsvc_request_t *req)1189{
1190int32_t ret = -1;1191gd1_mgmt_brick_op_req xlator_req = {11920,1193};1194dict_t *dict = NULL;1195xlator_t *xlator = NULL;1196xlator_t *any = NULL;1197dict_t *output = NULL;1198char msg[2048] = {0};1199glusterfs_ctx_t *ctx = NULL;1200glusterfs_graph_t *active = NULL;1201xlator_t *this = NULL;1202
1203GF_ASSERT(req);1204this = THIS;1205GF_ASSERT(this);1206
1207ctx = glusterfsd_ctx;1208GF_ASSERT(ctx);1209
1210active = ctx->active;1211if (!active) {1212req->rpc_err = GARBAGE_ARGS;1213goto out;1214}1215
1216any = active->first;1217ret = xdr_to_generic(req->msg[0], &xlator_req,1218(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1219if (ret < 0) {1220// failed to decode msg;1221req->rpc_err = GARBAGE_ARGS;1222goto out;1223}1224dict = dict_new();1225if (!dict)1226goto out;1227
1228ret = dict_unserialize(xlator_req.input.input_val,1229xlator_req.input.input_len, &dict);1230if (ret < 0) {1231gf_log(this->name, GF_LOG_ERROR,1232"failed to "1233"unserialize req-buffer to dictionary");1234goto out;1235}1236xlator = xlator_search_by_name(any, xlator_req.name);1237if (!xlator) {1238snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);1239goto out;1240}1241
1242output = dict_new();1243if (!output) {1244ret = -1;1245goto out;1246}1247
1248ret = xlator->notify(xlator, GF_EVENT_VOLUME_DEFRAG, dict, output);1249
1250ret = glusterfs_translator_info_response_send(req, ret, msg, output);1251out:1252if (dict)1253dict_unref(dict);1254free(xlator_req.input.input_val); // malloced by xdr1255if (xlator_req.dict.dict_val)1256free(xlator_req.dict.dict_val);1257if (output)1258dict_unref(output);1259free(xlator_req.name); // malloced by xdr1260
1261return ret;1262}
1263
1264static int1265glusterfs_handle_brick_status(rpcsvc_request_t *req)1266{
1267int ret = -1;1268gd1_mgmt_brick_op_req brick_req = {12690,1270};1271gd1_mgmt_brick_op_rsp rsp = {12720,1273};1274glusterfs_ctx_t *ctx = NULL;1275glusterfs_graph_t *active = NULL;1276xlator_t *this = NULL;1277xlator_t *server_xl = NULL;1278xlator_t *brick_xl = NULL;1279dict_t *dict = NULL;1280dict_t *output = NULL;1281uint32_t cmd = 0;1282char *msg = NULL;1283char *brickname = NULL;1284
1285GF_ASSERT(req);1286this = THIS;1287GF_ASSERT(this);1288
1289ret = xdr_to_generic(req->msg[0], &brick_req,1290(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1291if (ret < 0) {1292req->rpc_err = GARBAGE_ARGS;1293goto out;1294}1295
1296dict = dict_new();1297ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len,1298&dict);1299if (ret < 0) {1300gf_log(this->name, GF_LOG_ERROR,1301"Failed to unserialize "1302"req-buffer to dictionary");1303goto out;1304}1305
1306ret = dict_get_uint32(dict, "cmd", &cmd);1307if (ret) {1308gf_log(this->name, GF_LOG_ERROR, "Couldn't get status op");1309goto out;1310}1311
1312ret = dict_get_str(dict, "brick-name", &brickname);1313if (ret) {1314gf_log(this->name, GF_LOG_ERROR,1315"Couldn't get brickname from"1316" dict");1317goto out;1318}1319
1320ctx = glusterfsd_ctx;1321if (ctx == NULL) {1322gf_log(this->name, GF_LOG_ERROR, "ctx returned NULL");1323ret = -1;1324goto out;1325}1326if (ctx->active == NULL) {1327gf_log(this->name, GF_LOG_ERROR, "ctx->active returned NULL");1328ret = -1;1329goto out;1330}1331active = ctx->active;1332if (ctx->active->first == NULL) {1333gf_log(this->name, GF_LOG_ERROR,1334"ctx->active->first "1335"returned NULL");1336ret = -1;1337goto out;1338}1339server_xl = active->first;1340
1341brick_xl = get_xlator_by_name(server_xl, brickname);1342if (!brick_xl) {1343gf_log(this->name, GF_LOG_ERROR, "xlator is not loaded");1344ret = -1;1345goto out;1346}1347
1348output = dict_new();1349switch (cmd & GF_CLI_STATUS_MASK) {1350case GF_CLI_STATUS_MEM:1351ret = 0;1352gf_proc_dump_mem_info_to_dict(output);1353gf_proc_dump_mempool_info_to_dict(ctx, output);1354break;1355
1356case GF_CLI_STATUS_CLIENTS:1357case GF_CLI_STATUS_CLIENT_LIST:1358ret = server_xl->dumpops->priv_to_dict(server_xl, output,1359brickname);1360break;1361
1362case GF_CLI_STATUS_INODE:1363ret = server_xl->dumpops->inode_to_dict(brick_xl, output);1364break;1365
1366case GF_CLI_STATUS_FD:1367ret = server_xl->dumpops->fd_to_dict(brick_xl, output);1368break;1369
1370case GF_CLI_STATUS_CALLPOOL:1371ret = 0;1372gf_proc_dump_pending_frames_to_dict(ctx->pool, output);1373break;1374
1375default:1376ret = -1;1377msg = gf_strdup("Unknown status op");1378break;1379}1380rsp.op_ret = ret;1381rsp.op_errno = 0;1382if (ret && msg)1383rsp.op_errstr = msg;1384else1385rsp.op_errstr = "";1386
1387ret = dict_allocate_and_serialize(output, &rsp.output.output_val,1388&rsp.output.output_len);1389if (ret) {1390gf_log(this->name, GF_LOG_ERROR,1391"Failed to serialize output dict to rsp");1392goto out;1393}1394
1395glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,1396(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);1397ret = 0;1398
1399out:1400if (dict)1401dict_unref(dict);1402if (output)1403dict_unref(output);1404free(brick_req.input.input_val);1405if (brick_req.dict.dict_val)1406free(brick_req.dict.dict_val);1407free(brick_req.name);1408GF_FREE(msg);1409GF_FREE(rsp.output.output_val);1410
1411return ret;1412}
1413
1414static int1415glusterfs_handle_node_status(rpcsvc_request_t *req)1416{
1417int ret = -1;1418gd1_mgmt_brick_op_req node_req = {14190,1420};1421gd1_mgmt_brick_op_rsp rsp = {14220,1423};1424glusterfs_ctx_t *ctx = NULL;1425glusterfs_graph_t *active = NULL;1426xlator_t *any = NULL;1427xlator_t *node = NULL;1428xlator_t *subvol = NULL;1429dict_t *dict = NULL;1430dict_t *output = NULL;1431char *volname = NULL;1432char *node_name = NULL;1433char *subvol_name = NULL;1434uint32_t cmd = 0;1435char *msg = NULL;1436
1437GF_ASSERT(req);1438
1439ret = xdr_to_generic(req->msg[0], &node_req,1440(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1441if (ret < 0) {1442req->rpc_err = GARBAGE_ARGS;1443goto out;1444}1445
1446dict = dict_new();1447if (!dict) {1448ret = -1;1449gf_log(THIS->name, GF_LOG_ERROR, "Failed to allocate the dictionary");1450goto out;1451}1452
1453ret = dict_unserialize(node_req.input.input_val, node_req.input.input_len,1454&dict);1455if (ret < 0) {1456gf_log(THIS->name, GF_LOG_ERROR,1457"Failed to unserialize "1458"req buffer to dictionary");1459goto out;1460}1461
1462ret = dict_get_uint32(dict, "cmd", &cmd);1463if (ret) {1464gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get status op");1465goto out;1466}1467
1468ret = dict_get_str(dict, "volname", &volname);1469if (ret) {1470gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname");1471goto out;1472}1473
1474ctx = glusterfsd_ctx;1475GF_ASSERT(ctx);1476active = ctx->active;1477if (active == NULL) {1478gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");1479ret = -1;1480goto out;1481}1482any = active->first;1483
1484if ((cmd & GF_CLI_STATUS_SHD) != 0)1485ret = gf_asprintf(&node_name, "%s", "glustershd");1486#ifdef BUILD_GNFS1487else if ((cmd & GF_CLI_STATUS_NFS) != 0)1488ret = gf_asprintf(&node_name, "%s", "nfs-server");1489#endif1490else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)1491ret = gf_asprintf(&node_name, "%s", "quotad");1492else if ((cmd & GF_CLI_STATUS_BITD) != 0)1493ret = gf_asprintf(&node_name, "%s", "bitd");1494else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)1495ret = gf_asprintf(&node_name, "%s", "scrubber");1496
1497else {1498ret = -1;1499goto out;1500}1501if (ret == -1) {1502gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name");1503goto out;1504}1505
1506node = xlator_search_by_name(any, node_name);1507if (!node) {1508ret = -1;1509gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", node_name);1510goto out;1511}1512
1513if ((cmd & GF_CLI_STATUS_NFS) != 0)1514ret = gf_asprintf(&subvol_name, "%s", volname);1515else if ((cmd & GF_CLI_STATUS_SHD) != 0)1516ret = gf_asprintf(&subvol_name, "%s-replicate-0", volname);1517else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)1518ret = gf_asprintf(&subvol_name, "%s", volname);1519else if ((cmd & GF_CLI_STATUS_BITD) != 0)1520ret = gf_asprintf(&subvol_name, "%s", volname);1521else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)1522ret = gf_asprintf(&subvol_name, "%s", volname);1523else {1524ret = -1;1525goto out;1526}1527if (ret == -1) {1528gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name");1529goto out;1530}1531
1532subvol = xlator_search_by_name(node, subvol_name);1533if (!subvol) {1534ret = -1;1535gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",1536subvol_name);1537goto out;1538}1539
1540output = dict_new();1541if (!output) {1542ret = -1;1543gf_log(THIS->name, GF_LOG_ERROR, "Failed to allocate the dictionary");1544goto out;1545}1546
1547switch (cmd & GF_CLI_STATUS_MASK) {1548case GF_CLI_STATUS_MEM:1549ret = 0;1550gf_proc_dump_mem_info_to_dict(output);1551gf_proc_dump_mempool_info_to_dict(ctx, output);1552break;1553
1554case GF_CLI_STATUS_CLIENTS:1555// clients not available for SHD1556if ((cmd & GF_CLI_STATUS_SHD) != 0)1557break;1558
1559ret = dict_set_str(output, "volname", volname);1560if (ret) {1561gf_log(THIS->name, GF_LOG_ERROR,1562"Error setting volname to dict");1563goto out;1564}1565ret = node->dumpops->priv_to_dict(node, output, NULL);1566break;1567
1568case GF_CLI_STATUS_INODE:1569ret = 0;1570inode_table_dump_to_dict(subvol->itable, "conn0", output);1571ret = dict_set_int32(output, "conncount", 1);1572break;1573
1574case GF_CLI_STATUS_FD:1575// cannot find fd-tables in nfs-server graph1576// TODO: finish once found1577break;1578
1579case GF_CLI_STATUS_CALLPOOL:1580ret = 0;1581gf_proc_dump_pending_frames_to_dict(ctx->pool, output);1582break;1583
1584default:1585ret = -1;1586msg = gf_strdup("Unknown status op");1587gf_log(THIS->name, GF_LOG_ERROR, "%s", msg);1588break;1589}1590rsp.op_ret = ret;1591rsp.op_errno = 0;1592if (ret && msg)1593rsp.op_errstr = msg;1594else1595rsp.op_errstr = "";1596
1597ret = dict_allocate_and_serialize(output, &rsp.output.output_val,1598&rsp.output.output_len);1599if (ret) {1600gf_log(THIS->name, GF_LOG_ERROR,1601"Failed to serialize output dict to rsp");1602goto out;1603}1604
1605glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,1606(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);1607ret = 0;1608
1609out:1610if (dict)1611dict_unref(dict);1612if (output)1613dict_unref(output);1614free(node_req.input.input_val);1615if (node_req.dict.dict_val)1616free(node_req.dict.dict_val);1617GF_FREE(msg);1618GF_FREE(rsp.output.output_val);1619GF_FREE(node_name);1620GF_FREE(subvol_name);1621
1622gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);1623return ret;1624}
1625
1626static int1627glusterfs_handle_nfs_profile(rpcsvc_request_t *req)1628{
1629int ret = -1;1630gd1_mgmt_brick_op_req nfs_req = {16310,1632};1633gd1_mgmt_brick_op_rsp rsp = {16340,1635};1636dict_t *dict = NULL;1637glusterfs_ctx_t *ctx = NULL;1638glusterfs_graph_t *active = NULL;1639xlator_t *any = NULL;1640xlator_t *nfs = NULL;1641xlator_t *subvol = NULL;1642char *volname = NULL;1643dict_t *output = NULL;1644
1645GF_ASSERT(req);1646
1647ret = xdr_to_generic(req->msg[0], &nfs_req,1648(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1649if (ret < 0) {1650req->rpc_err = GARBAGE_ARGS;1651goto out;1652}1653
1654dict = dict_new();1655ret = dict_unserialize(nfs_req.input.input_val, nfs_req.input.input_len,1656&dict);1657if (ret < 0) {1658gf_log(THIS->name, GF_LOG_ERROR,1659"Failed to "1660"unserialize req-buffer to dict");1661goto out;1662}1663
1664ret = dict_get_str(dict, "volname", &volname);1665if (ret) {1666gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname");1667goto out;1668}1669
1670ctx = glusterfsd_ctx;1671GF_ASSERT(ctx);1672
1673active = ctx->active;1674if (active == NULL) {1675gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");1676ret = -1;1677goto out;1678}1679any = active->first;1680
1681// is this needed?1682// are problems possible by searching for subvol directly from "any"?1683nfs = xlator_search_by_name(any, "nfs-server");1684if (!nfs) {1685ret = -1;1686gf_log(THIS->name, GF_LOG_ERROR,1687"xlator nfs-server is "1688"not loaded");1689goto out;1690}1691
1692subvol = xlator_search_by_name(nfs, volname);1693if (!subvol) {1694ret = -1;1695gf_log(THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", volname);1696goto out;1697}1698
1699output = dict_new();1700ret = subvol->notify(subvol, GF_EVENT_TRANSLATOR_INFO, dict, output);1701
1702rsp.op_ret = ret;1703rsp.op_errno = 0;1704rsp.op_errstr = "";1705
1706ret = dict_allocate_and_serialize(output, &rsp.output.output_val,1707&rsp.output.output_len);1708if (ret) {1709gf_log(THIS->name, GF_LOG_ERROR,1710"Failed to serialize output dict to rsp");1711goto out;1712}1713
1714glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,1715(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);1716ret = 0;1717
1718out:1719free(nfs_req.input.input_val);1720if (nfs_req.dict.dict_val)1721free(nfs_req.dict.dict_val);1722if (dict)1723dict_unref(dict);1724if (output)1725dict_unref(output);1726GF_FREE(rsp.output.output_val);1727
1728gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);1729return ret;1730}
1731
1732static int1733glusterfs_handle_volume_barrier_op(rpcsvc_request_t *req)1734{
1735int32_t ret = -1;1736gd1_mgmt_brick_op_req xlator_req = {17370,1738};1739dict_t *dict = NULL;1740xlator_t *xlator = NULL;1741xlator_t *any = NULL;1742dict_t *output = NULL;1743char msg[2048] = {0};1744glusterfs_ctx_t *ctx = NULL;1745glusterfs_graph_t *active = NULL;1746xlator_t *this = NULL;1747
1748GF_ASSERT(req);1749this = THIS;1750GF_ASSERT(this);1751
1752ctx = glusterfsd_ctx;1753GF_ASSERT(ctx);1754
1755active = ctx->active;1756if (!active) {1757req->rpc_err = GARBAGE_ARGS;1758goto out;1759}1760
1761any = active->first;1762ret = xdr_to_generic(req->msg[0], &xlator_req,1763(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1764if (ret < 0) {1765// failed to decode msg;1766req->rpc_err = GARBAGE_ARGS;1767goto out;1768}1769dict = dict_new();1770if (!dict)1771goto out;1772
1773ret = dict_unserialize(xlator_req.input.input_val,1774xlator_req.input.input_len, &dict);1775if (ret < 0) {1776gf_log(this->name, GF_LOG_ERROR,1777"failed to "1778"unserialize req-buffer to dictionary");1779goto out;1780}1781xlator = xlator_search_by_name(any, xlator_req.name);1782if (!xlator) {1783snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);1784goto out;1785}1786
1787output = dict_new();1788if (!output) {1789ret = -1;1790goto out;1791}1792
1793ret = xlator->notify(xlator, GF_EVENT_VOLUME_BARRIER_OP, dict, output);1794
1795ret = glusterfs_translator_info_response_send(req, ret, msg, output);1796out:1797if (dict)1798dict_unref(dict);1799free(xlator_req.input.input_val); // malloced by xdr1800if (xlator_req.dict.dict_val)1801free(xlator_req.dict.dict_val);1802if (output)1803dict_unref(output);1804free(xlator_req.name); // malloced by xdr1805
1806return ret;1807}
1808
1809static int1810glusterfs_handle_barrier(rpcsvc_request_t *req)1811{
1812int ret = -1;1813gd1_mgmt_brick_op_req brick_req = {18140,1815};1816gd1_mgmt_brick_op_rsp brick_rsp = {18170,1818};1819glusterfs_ctx_t *ctx = NULL;1820glusterfs_graph_t *active = NULL;1821xlator_t *top = NULL;1822xlator_t *xlator = NULL;1823xlator_t *old_THIS = NULL;1824dict_t *dict = NULL;1825gf_boolean_t barrier = _gf_true;1826xlator_list_t *trav;1827
1828GF_ASSERT(req);1829
1830ret = xdr_to_generic(req->msg[0], &brick_req,1831(xdrproc_t)xdr_gd1_mgmt_brick_op_req);1832if (ret < 0) {1833req->rpc_err = GARBAGE_ARGS;1834goto out;1835}1836
1837ctx = glusterfsd_ctx;1838GF_ASSERT(ctx);1839active = ctx->active;1840if (active == NULL) {1841gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");1842ret = -1;1843goto out;1844}1845top = active->first;1846
1847for (trav = top->children; trav; trav = trav->next) {1848if (strcmp(trav->xlator->name, brick_req.name) == 0) {1849break;1850}1851}1852if (!trav) {1853ret = -1;1854goto out;1855}1856top = trav->xlator;1857
1858dict = dict_new();1859if (!dict) {1860ret = -1;1861goto out;1862}1863
1864ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len,1865&dict);1866if (ret < 0) {1867gf_log(THIS->name, GF_LOG_ERROR,1868"Failed to unserialize "1869"request dictionary");1870goto out;1871}1872
1873brick_rsp.op_ret = 0;1874brick_rsp.op_errstr = ""; // initing to prevent serilaztion failures1875old_THIS = THIS;1876
1877/* Send barrier request to the barrier xlator */1878xlator = get_xlator_by_type(top, "features/barrier");1879if (!xlator) {1880ret = -1;1881gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",1882"features/barrier");1883goto out;1884}1885
1886THIS = xlator;1887// TODO: Extend this to accept return of errnos1888ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict);1889if (ret) {1890gf_log(THIS->name, GF_LOG_ERROR, "barrier notify failed");1891brick_rsp.op_ret = ret;1892brick_rsp.op_errstr = gf_strdup(1893"Failed to reconfigure "1894"barrier.");1895/* This is to invoke changelog-barrier disable if barrier1896* disable fails and don't invoke if barrier enable fails.
1897*/
1898barrier = dict_get_str_boolean(dict, "barrier", _gf_true);1899if (barrier)1900goto submit_reply;1901}1902
1903/* Reset THIS so that we have it correct in case of an error below1904*/
1905THIS = old_THIS;1906
1907/* Send barrier request to changelog as well */1908xlator = get_xlator_by_type(top, "features/changelog");1909if (!xlator) {1910ret = -1;1911gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",1912"features/changelog");1913goto out;1914}1915
1916THIS = xlator;1917ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict);1918if (ret) {1919gf_log(THIS->name, GF_LOG_ERROR, "changelog notify failed");1920brick_rsp.op_ret = ret;1921brick_rsp.op_errstr = gf_strdup("changelog notify failed");1922goto submit_reply;1923}1924
1925submit_reply:1926THIS = old_THIS;1927
1928ret = glusterfs_submit_reply(req, &brick_rsp, NULL, 0, NULL,1929(xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);1930
1931out:1932if (dict)1933dict_unref(dict);1934free(brick_req.input.input_val);1935if (brick_req.dict.dict_val)1936free(brick_req.dict.dict_val);1937gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);1938return ret;1939}
1940
1941static int1942glusterfs_handle_rpc_msg(rpcsvc_request_t *req)1943{
1944int ret = -1;1945/* for now, nothing */1946return ret;1947}
1948
1949static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {1950[GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},1951[GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,1952GF_CBK_EVENT_NOTIFY},1953[GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP},1954};1955
1956static struct rpcclnt_cb_program mgmt_cbk_prog = {1957.progname = "GlusterFS Callback",1958.prognum = GLUSTER_CBK_PROGRAM,1959.progver = GLUSTER_CBK_VERSION,1960.actors = mgmt_cbk_actors,1961.numactors = GF_CBK_MAXVALUE,1962};1963
1964static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {1965[GF_PMAP_NULL] = "NULL",1966[GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",1967[GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",1968[GF_PMAP_SIGNIN] = "SIGNIN",1969[GF_PMAP_SIGNOUT] = "SIGNOUT",1970[GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */1971};1972
1973static rpc_clnt_prog_t clnt_pmap_prog = {1974.progname = "Gluster Portmap",1975.prognum = GLUSTER_PMAP_PROGRAM,1976.progver = GLUSTER_PMAP_VERSION,1977.procnames = clnt_pmap_procs,1978};1979
1980static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {1981[GF_HNDSK_NULL] = "NULL",1982[GF_HNDSK_SETVOLUME] = "SETVOLUME",1983[GF_HNDSK_GETSPEC] = "GETSPEC",1984[GF_HNDSK_PING] = "PING",1985[GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",1986};1987
1988static rpc_clnt_prog_t clnt_handshake_prog = {1989.progname = "GlusterFS Handshake",1990.prognum = GLUSTER_HNDSK_PROGRAM,1991.progver = GLUSTER_HNDSK_VERSION,1992.procnames = clnt_handshake_procs,1993};1994
1995static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {1996[GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL,1997GLUSTERD_BRICK_NULL, DRC_NA, 0},1998[GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL,1999GLUSTERD_BRICK_TERMINATE, DRC_NA, 0},2000[GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",2001glusterfs_handle_translator_info_get, NULL,2002GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0},2003[GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",2004glusterfs_handle_translator_op, NULL,2005GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0},2006[GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL,2007GLUSTERD_BRICK_STATUS, DRC_NA, 0},2008[GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",2009glusterfs_handle_defrag, NULL,2010GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0},2011[GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile,2012NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0},2013[GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL,2014GLUSTERD_NODE_STATUS, DRC_NA, 0},2015[GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",2016glusterfs_handle_volume_barrier_op, NULL,2017GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0},2018[GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL,2019GLUSTERD_BRICK_BARRIER, DRC_NA, 0},2020[GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL,2021GLUSTERD_NODE_BITROT, DRC_NA, 0},2022[GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL,2023GLUSTERD_BRICK_ATTACH, DRC_NA, 0},2024
2025[GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics,2026NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0},2027
2028[GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL,2029GLUSTERD_SVC_ATTACH, DRC_NA, 0},2030
2031[GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL,2032GLUSTERD_SVC_DETACH, DRC_NA, 0},2033
2034};2035
2036static struct rpcsvc_program glusterfs_mop_prog = {2037.progname = "Gluster Brick operations",2038.prognum = GD_BRICK_PROGRAM,2039.progver = GD_BRICK_VERSION,2040.actors = glusterfs_actors,2041.numactors = GLUSTERD_BRICK_MAXVALUE,2042.synctask = _gf_true,2043};2044
2045int
2046mgmt_submit_request(void *req, call_frame_t *frame, glusterfs_ctx_t *ctx,2047rpc_clnt_prog_t *prog, int procnum, fop_cbk_fn_t cbkfn,2048xdrproc_t xdrproc)2049{
2050int ret = -1;2051int count = 0;2052struct iovec iov = {20530,2054};2055struct iobuf *iobuf = NULL;2056struct iobref *iobref = NULL;2057ssize_t xdr_size = 0;2058gf_boolean_t frame_cleanup = _gf_true;2059
2060iobref = iobref_new();2061if (!iobref) {2062goto out;2063}2064
2065if (req) {2066xdr_size = xdr_sizeof(xdrproc, req);2067
2068iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size);2069if (!iobuf) {2070goto out;2071};2072
2073iobref_add(iobref, iobuf);2074
2075iov.iov_base = iobuf->ptr;2076iov.iov_len = iobuf_pagesize(iobuf);2077
2078/* Create the xdr payload */2079ret = xdr_serialize_generic(iov, req, xdrproc);2080if (ret == -1) {2081gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload");2082goto out;2083}2084iov.iov_len = ret;2085count = 1;2086}2087
2088/* Send the msg */2089ret = rpc_clnt_submit(ctx->mgmt, prog, procnum, cbkfn, &iov, count, NULL, 0,2090iobref, frame, NULL, 0, NULL, 0, NULL);2091
2092frame_cleanup = _gf_false;2093out:2094if (iobref)2095iobref_unref(iobref);2096
2097if (iobuf)2098iobuf_unref(iobuf);2099
2100if (frame_cleanup)2101STACK_DESTROY(frame->root);2102
2103return ret;2104}
2105
2106static int2107mgmt_pmap_signin2_cbk(struct rpc_req *req, struct iovec *iov, int count,2108void *myframe)2109{
2110pmap_signin_rsp rsp = {21110,2112};2113glusterfs_ctx_t *ctx = NULL;2114call_frame_t *frame = NULL;2115int ret = 0;2116
2117ctx = glusterfsd_ctx;2118frame = myframe;2119
2120if (-1 == req->rpc_status) {2121ret = -1;2122rsp.op_ret = -1;2123rsp.op_errno = EINVAL;2124goto out;2125}2126
2127ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);2128if (ret < 0) {2129gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error");2130rsp.op_ret = -1;2131rsp.op_errno = EINVAL;2132goto out;2133}2134
2135if (-1 == rsp.op_ret) {2136gf_log(frame->this->name, GF_LOG_ERROR,2137"failed to register the port with glusterd");2138ret = -1;2139goto out;2140}2141
2142ret = 0;2143out:2144if (need_emancipate)2145emancipate(ctx, ret);2146
2147STACK_DESTROY(frame->root);2148return 0;2149}
2150
2151static int2152mgmt_pmap_signin_cbk(struct rpc_req *req, struct iovec *iov, int count,2153void *myframe)2154{
2155pmap_signin_rsp rsp = {21560,2157};2158call_frame_t *frame = NULL;2159int ret = 0;2160int emancipate_ret = -1;2161pmap_signin_req pmap_req = {21620,2163};2164cmd_args_t *cmd_args = NULL;2165glusterfs_ctx_t *ctx = NULL;2166char brick_name[PATH_MAX] = {21670,2168};2169gf_boolean_t frame_cleanup = _gf_true;2170
2171frame = myframe;2172ctx = glusterfsd_ctx;2173cmd_args = &ctx->cmd_args;2174
2175if (-1 == req->rpc_status) {2176ret = -1;2177rsp.op_ret = -1;2178rsp.op_errno = EINVAL;2179goto out;2180}2181
2182ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);2183if (ret < 0) {2184gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error");2185rsp.op_ret = -1;2186rsp.op_errno = EINVAL;2187goto out;2188}2189
2190if (-1 == rsp.op_ret) {2191gf_log(frame->this->name, GF_LOG_ERROR,2192"failed to register the port with glusterd");2193ret = -1;2194goto out;2195}2196
2197if (!cmd_args->brick_port2) {2198/* We are done with signin process */2199emancipate_ret = 0;2200goto out;2201}2202
2203snprintf(brick_name, PATH_MAX, "%s.rdma", cmd_args->brick_name);2204pmap_req.port = cmd_args->brick_port2;2205pmap_req.brick = brick_name;2206
2207ret = mgmt_submit_request(&pmap_req, frame, ctx, &clnt_pmap_prog,2208GF_PMAP_SIGNIN, mgmt_pmap_signin2_cbk,2209(xdrproc_t)xdr_pmap_signin_req);2210frame_cleanup = _gf_false;2211if (ret)2212goto out;2213
2214return 0;2215
2216out:2217if (need_emancipate && (ret < 0 || !cmd_args->brick_port2))2218emancipate(ctx, emancipate_ret);2219
2220if (frame_cleanup)2221STACK_DESTROY(frame->root);2222
2223return 0;2224}
2225
2226static int2227glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)2228{
2229call_frame_t *frame = NULL;2230xlator_list_t **trav_p;2231xlator_t *top;2232pmap_signin_req req = {22330,2234};2235int ret = -1;2236int emancipate_ret = -1;2237cmd_args_t *cmd_args = NULL;2238
2239cmd_args = &ctx->cmd_args;2240
2241if (!cmd_args->brick_port || !cmd_args->brick_name) {2242gf_log("fsd-mgmt", GF_LOG_DEBUG,2243"portmapper signin arguments not given");2244emancipate_ret = 0;2245goto out;2246}2247
2248req.port = cmd_args->brick_port;2249req.pid = (int)getpid(); /* only glusterd2 consumes this */2250
2251if (ctx->active) {2252top = ctx->active->first;2253for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {2254frame = create_frame(THIS, ctx->pool);2255req.brick = (*trav_p)->xlator->name;2256ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog,2257GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,2258(xdrproc_t)xdr_pmap_signin_req);2259if (ret < 0) {2260gf_log(THIS->name, GF_LOG_WARNING,2261"failed to send sign in request; brick = %s", req.brick);2262}2263}2264}2265
2266/* unfortunately, the caller doesn't care about the returned value */2267
2268out:2269if (need_emancipate && ret < 0)2270emancipate(ctx, emancipate_ret);2271return ret;2272}
2273
2274static int2275mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,2276void *myframe)2277{
2278gf_getspec_rsp rsp = {22790,2280};2281call_frame_t *frame = NULL;2282glusterfs_ctx_t *ctx = NULL;2283int ret = 0, locked = 0;2284ssize_t size = 0;2285FILE *tmpfp = NULL;2286char *volfile_id = NULL;2287gf_volfile_t *volfile_obj = NULL;2288gf_volfile_t *volfile_tmp = NULL;2289char sha256_hash[SHA256_DIGEST_LENGTH] = {22900,2291};2292dict_t *dict = NULL;2293char *servers_list = NULL;2294int tmp_fd = -1;2295char template[] = "/tmp/glfs.volfile.XXXXXX";2296
2297frame = myframe;2298ctx = frame->this->ctx;2299
2300if (-1 == req->rpc_status) {2301ret = -1;2302goto out;2303}2304
2305ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);2306if (ret < 0) {2307gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");2308ret = -1;2309goto out;2310}2311
2312if (-1 == rsp.op_ret) {2313gf_log(frame->this->name, GF_LOG_ERROR,2314"failed to get the 'volume file' from server");2315ret = rsp.op_errno;2316goto out;2317}2318
2319if (!rsp.xdata.xdata_len) {2320goto volfile;2321}2322
2323dict = dict_new();2324if (!dict) {2325ret = -1;2326errno = ENOMEM;2327goto out;2328}2329
2330ret = dict_unserialize(rsp.xdata.xdata_val, rsp.xdata.xdata_len, &dict);2331if (ret) {2332gf_log(frame->this->name, GF_LOG_ERROR,2333"failed to unserialize xdata to dictionary");2334goto out;2335}2336dict->extra_stdfree = rsp.xdata.xdata_val;2337
2338ret = dict_get_str(dict, "servers-list", &servers_list);2339if (ret) {2340/* Server list is set by glusterd at the time of getspec */2341ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list);2342if (ret)2343goto volfile;2344}2345
2346gf_log(frame->this->name, GF_LOG_INFO,2347"Received list of available volfile servers: %s", servers_list);2348
2349ret = gf_process_getspec_servers_list(&ctx->cmd_args, servers_list);2350if (ret) {2351gf_log(frame->this->name, GF_LOG_ERROR,2352"Failed (%s) to process servers list: %s", strerror(errno),2353servers_list);2354}2355
2356volfile:2357size = rsp.op_ret;2358volfile_id = frame->local;2359if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {2360ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,2361dict);2362goto post_graph_mgmt;2363}2364
2365ret = 0;2366glusterfs_compute_sha256((const unsigned char *)rsp.spec, size,2367sha256_hash);2368
2369LOCK(&ctx->volfile_lock);2370{2371locked = 1;2372
2373list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)2374{2375if (!strcmp(volfile_id, volfile_obj->vol_id)) {2376if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,2377sizeof(volfile_obj->volfile_checksum))) {2378UNLOCK(&ctx->volfile_lock);2379gf_log(frame->this->name, GF_LOG_INFO,2380"No change in volfile,"2381"continuing");2382goto post_unlock;2383}2384volfile_tmp = volfile_obj;2385break;2386}2387}2388
2389/* coverity[secure_temp] mkstemp uses 0600 as the mode */2390tmp_fd = mkstemp(template);2391if (-1 == tmp_fd) {2392UNLOCK(&ctx->volfile_lock);2393gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,2394"create template=%s", template, NULL);2395ret = -1;2396goto post_unlock;2397}2398
2399/* Calling unlink so that when the file is closed or program2400* terminates the temporary file is deleted.
2401*/
2402ret = sys_unlink(template);2403if (ret < 0) {2404gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,2405"delete template=%s", template, NULL);2406ret = 0;2407}2408
2409tmpfp = fdopen(tmp_fd, "w+b");2410if (!tmpfp) {2411ret = -1;2412goto out;2413}2414
2415fwrite(rsp.spec, size, 1, tmpfp);2416fflush(tmpfp);2417if (ferror(tmpfp)) {2418ret = -1;2419goto out;2420}2421
2422/* Check if only options have changed. No need to reload the2423* volfile if topology hasn't changed.
2424* glusterfs_volfile_reconfigure returns 3 possible return states
2425* return 0 =======> reconfiguration of options has succeeded
2426* return 1 =======> the graph has to be reconstructed and all
2427* the xlators should be inited return -1(or -ve) =======> Some Internal
2428* Error occurred during the operation
2429*/
2430
2431ret = glusterfs_volfile_reconfigure(tmpfp, ctx);2432if (ret == 0) {2433gf_log("glusterfsd-mgmt", GF_LOG_DEBUG,2434"No need to re-load volfile, reconfigure done");2435if (!volfile_tmp) {2436ret = -1;2437UNLOCK(&ctx->volfile_lock);2438gf_log("mgmt", GF_LOG_ERROR,2439"Graph reconfigure succeeded with out having "2440"checksum.");2441goto post_unlock;2442}2443memcpy(volfile_tmp->volfile_checksum, sha256_hash,2444sizeof(volfile_tmp->volfile_checksum));2445goto out;2446}2447
2448if (ret < 0) {2449UNLOCK(&ctx->volfile_lock);2450gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!");2451goto post_unlock;2452}2453
2454ret = glusterfs_process_volfp(ctx, tmpfp);2455/* tmpfp closed */2456tmpfp = NULL;2457tmp_fd = -1;2458if (ret)2459goto out;2460
2461if (!volfile_tmp) {2462volfile_tmp = GF_CALLOC(1, sizeof(gf_volfile_t),2463gf_common_volfile_t);2464if (!volfile_tmp) {2465ret = -1;2466goto out;2467}2468
2469INIT_LIST_HEAD(&volfile_tmp->volfile_list);2470volfile_tmp->graph = ctx->active;2471list_add(&volfile_tmp->volfile_list, &ctx->volfile_list);2472snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s",2473volfile_id);2474}2475memcpy(volfile_tmp->volfile_checksum, sha256_hash,2476sizeof(volfile_tmp->volfile_checksum));2477}2478UNLOCK(&ctx->volfile_lock);2479
2480locked = 0;2481
2482post_graph_mgmt:2483if (!is_mgmt_rpc_reconnect) {2484need_emancipate = _gf_true;2485glusterfs_mgmt_pmap_signin(ctx);2486is_mgmt_rpc_reconnect = _gf_true;2487}2488
2489out:2490
2491if (locked)2492UNLOCK(&ctx->volfile_lock);2493post_unlock:2494GF_FREE(frame->local);2495frame->local = NULL;2496STACK_DESTROY(frame->root);2497free(rsp.spec);2498
2499if (dict)2500dict_unref(dict);2501
2502// Stop if server is running at an unsupported op-version2503if (ENOTSUP == ret) {2504gf_log("mgmt", GF_LOG_ERROR,2505"Server is operating at an "2506"op-version which is not supported");2507cleanup_and_exit(0);2508}2509
2510if (ret && ctx && !ctx->active) {2511/* Do it only for the first time */2512/* Failed to get the volume file, something wrong,2513restart the process */
2514gf_log("mgmt", GF_LOG_ERROR, "failed to fetch volume file (key:%s)",2515ctx->cmd_args.volfile_id);2516emancipate(ctx, ret);2517cleanup_and_exit(0);2518}2519
2520if (tmpfp)2521fclose(tmpfp);2522else if (tmp_fd != -1)2523sys_close(tmp_fd);2524
2525return 0;2526}
2527
2528static int2529glusterfs_volfile_fetch_one(glusterfs_ctx_t *ctx, char *volfile_id)2530{
2531cmd_args_t *cmd_args = NULL;2532gf_getspec_req req = {25330,2534};2535int ret = 0;2536call_frame_t *frame = NULL;2537dict_t *dict = NULL;2538
2539cmd_args = &ctx->cmd_args;2540if (!volfile_id) {2541volfile_id = ctx->cmd_args.volfile_id;2542if (!volfile_id) {2543gf_log(THIS->name, GF_LOG_ERROR,2544"No volfile-id provided, erroring out");2545return -1;2546}2547}2548
2549frame = create_frame(THIS, ctx->pool);2550if (!frame) {2551ret = -1;2552goto out;2553}2554
2555req.key = volfile_id;2556req.flags = 0;2557/*2558* We are only storing one variable in local, hence using the same
2559* variable. If multiple local variable is required, create a struct.
2560*/
2561frame->local = gf_strdup(volfile_id);2562if (!frame->local) {2563ret = -1;2564goto out;2565}2566
2567dict = dict_new();2568if (!dict) {2569ret = -1;2570goto out;2571}2572
2573// Set the supported min and max op-versions, so glusterd can make a2574// decision2575ret = dict_set_int32(dict, "min-op-version", GD_OP_VERSION_MIN);2576if (ret) {2577gf_log(THIS->name, GF_LOG_ERROR,2578"Failed to set min-op-version"2579" in request dict");2580goto out;2581}2582
2583ret = dict_set_int32(dict, "max-op-version", GD_OP_VERSION_MAX);2584if (ret) {2585gf_log(THIS->name, GF_LOG_ERROR,2586"Failed to set max-op-version"2587" in request dict");2588goto out;2589}2590
2591/* Ask for a list of volfile (glusterd2 only) servers */2592if (GF_CLIENT_PROCESS == ctx->process_mode) {2593req.flags = req.flags | GF_GETSPEC_FLAG_SERVERS_LIST;2594}2595
2596if (cmd_args->brick_name) {2597ret = dict_set_dynstr_with_alloc(dict, "brick_name",2598cmd_args->brick_name);2599if (ret) {2600gf_log(THIS->name, GF_LOG_ERROR,2601"Failed to set brick_name in request dict");2602goto out;2603}2604}2605
2606ret = dict_allocate_and_serialize(dict, &req.xdata.xdata_val,2607&req.xdata.xdata_len);2608if (ret < 0) {2609gf_log(THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary");2610goto out;2611}2612
2613ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog,2614GF_HNDSK_GETSPEC, mgmt_getspec_cbk,2615(xdrproc_t)xdr_gf_getspec_req);2616
2617/* In case of error the frame will be destroy by rpc_clnt_submit */2618frame = NULL;2619out:2620GF_FREE(req.xdata.xdata_val);2621if (dict)2622dict_unref(dict);2623if (ret && frame) {2624/* Free the frame->local fast, because we have not used memget2625*/
2626GF_FREE(frame->local);2627frame->local = NULL;2628STACK_DESTROY(frame->root);2629}2630
2631return ret;2632}
2633
2634int
2635glusterfs_volfile_fetch(glusterfs_ctx_t *ctx)2636{
2637xlator_t *server_xl = NULL;2638xlator_list_t *trav;2639gf_volfile_t *volfile_obj = NULL;2640int ret = 0;2641
2642LOCK(&ctx->volfile_lock);2643{2644if (ctx->active &&2645mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {2646list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)2647{2648ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id);2649}2650UNLOCK(&ctx->volfile_lock);2651return ret;2652}2653
2654if (ctx->active) {2655server_xl = ctx->active->first;2656if (strcmp(server_xl->type, "protocol/server") != 0) {2657server_xl = NULL;2658}2659}2660if (!server_xl) {2661/* Startup (ctx->active not set) or non-server. */2662UNLOCK(&ctx->volfile_lock);2663return glusterfs_volfile_fetch_one(ctx, ctx->cmd_args.volfile_id);2664}2665
2666ret = 0;2667for (trav = server_xl->children; trav; trav = trav->next) {2668ret |= glusterfs_volfile_fetch_one(ctx, trav->xlator->volfile_id);2669}2670}2671UNLOCK(&ctx->volfile_lock);2672return ret;2673}
2674
2675static int32_t2676glusterfs_rebalance_event_notify_cbk(struct rpc_req *req, struct iovec *iov,2677int count, void *myframe)2678{
2679gf_event_notify_rsp rsp = {26800,2681};2682call_frame_t *frame = NULL;2683int ret = 0;2684
2685frame = myframe;2686
2687if (-1 == req->rpc_status) {2688gf_log(frame->this->name, GF_LOG_ERROR,2689"failed to get the rsp from server");2690ret = -1;2691goto out;2692}2693
2694ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);2695if (ret < 0) {2696gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");2697ret = -1;2698goto out;2699}2700
2701if (-1 == rsp.op_ret) {2702gf_log(frame->this->name, GF_LOG_ERROR,2703"Received error (%s) from server", strerror(rsp.op_errno));2704ret = -1;2705goto out;2706}2707out:2708free(rsp.dict.dict_val); // malloced by xdr2709
2710if (frame) {2711STACK_DESTROY(frame->root);2712}2713
2714return ret;2715}
2716
2717static int32_t2718glusterfs_rebalance_event_notify(dict_t *dict)2719{
2720glusterfs_ctx_t *ctx = NULL;2721gf_event_notify_req req = {27220,2723};2724int32_t ret = -1;2725cmd_args_t *cmd_args = NULL;2726call_frame_t *frame = NULL;2727
2728ctx = glusterfsd_ctx;2729cmd_args = &ctx->cmd_args;2730
2731frame = create_frame(THIS, ctx->pool);2732
2733req.op = GF_EN_DEFRAG_STATUS;2734
2735if (dict) {2736ret = dict_set_str(dict, "volname", cmd_args->volfile_id);2737if (ret) {2738gf_log("", GF_LOG_ERROR, "failed to set volname");2739}2740ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,2741&req.dict.dict_len);2742if (ret) {2743gf_log("", GF_LOG_ERROR, "failed to serialize dict");2744}2745}2746
2747ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog,2748GF_HNDSK_EVENT_NOTIFY,2749glusterfs_rebalance_event_notify_cbk,2750(xdrproc_t)xdr_gf_event_notify_req);2751
2752GF_FREE(req.dict.dict_val);2753return ret;2754}
2755
2756static int2757mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,2758void *data)2759{
2760xlator_t *this = NULL;2761glusterfs_ctx_t *ctx = NULL;2762int ret = 0;2763server_cmdline_t *server = NULL;2764rpc_transport_t *rpc_trans = NULL;2765int need_term = 0;2766int emval = 0;2767static int log_ctr1;2768static int log_ctr2;2769struct dnscache6 *dnscache = NULL;2770
2771this = mydata;2772rpc_trans = rpc->conn.trans;2773ctx = this->ctx;2774
2775switch (event) {2776case RPC_CLNT_DISCONNECT:2777if (rpc_trans->connect_failed) {2778GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_ERROR,2779"failed to connect to remote-"2780"host: %s",2781ctx->cmd_args.volfile_server);2782} else {2783GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_INFO,2784"disconnected from remote-"2785"host: %s",2786ctx->cmd_args.volfile_server);2787}2788
2789if (!rpc->disabled) {2790/*2791* Check if dnscache is exhausted for current server
2792* and continue until cache is exhausted
2793*/
2794dnscache = rpc_trans->dnscache;2795if (dnscache && dnscache->next) {2796break;2797}2798}2799server = ctx->cmd_args.curr_server;2800
2801if (ctx->cmd_args.brick_port && ctx->cmd_args.brick_name) {2802/* This process requires a portmap signin with glusterd.2803* Currently the glusterd portmaps are local to each glusterd.
2804* Hence connecting the process to a different volfile server
2805* won't work well with such process, so don't try to connect
2806* to backup volfile server here.
2807*/
2808if (!ctx->active) {2809need_term = 1;2810}2811emval = ENOTCONN;2812GF_LOG_OCCASIONALLY(log_ctr2, "glusterfsd-mgmt", GF_LOG_INFO,2813"Port-mapper is active, Giving up on the "2814"backup volfile servers");2815break;2816}2817if (server->list.next == &ctx->cmd_args.volfile_servers) {2818if (!ctx->active) {2819need_term = 1;2820gf_log("glusterfsd-mgmt", GF_LOG_INFO,2821"Exhausted all volfile servers, Exiting");2822emval = ENOTCONN;2823break;2824} else {2825server = list_first_entry(&ctx->cmd_args.volfile_servers,2826typeof(*server), list);2827emval = ENOTCONN;2828GF_LOG_OCCASIONALLY(2829log_ctr2, "glusterfsd-mgmt", GF_LOG_INFO,2830"Exhausted all volfile servers, Retrying from again!");2831}2832} else {2833server = list_entry(server->list.next, typeof(*server), list);2834}2835ctx->cmd_args.curr_server = server;2836ctx->cmd_args.volfile_server = server->volfile_server;2837
2838ret = dict_set_str(rpc_trans->options, "remote-host",2839server->volfile_server);2840if (ret != 0) {2841gf_log("glusterfsd-mgmt", GF_LOG_ERROR,2842"failed to set remote-host: %s", server->volfile_server);2843if (!ctx->active) {2844need_term = 1;2845}2846emval = ENOTCONN;2847break;2848}2849gf_log("glusterfsd-mgmt", GF_LOG_INFO,2850"connecting to next volfile server %s",2851server->volfile_server);2852break;2853case RPC_CLNT_CONNECT:2854ret = glusterfs_volfile_fetch(ctx);2855if (ret) {2856emval = ret;2857if (!ctx->active) {2858need_term = 1;2859gf_log("glusterfsd-mgmt", GF_LOG_ERROR,2860"failed to fetch volume file (key:%s)",2861ctx->cmd_args.volfile_id);2862break;2863}2864}2865
2866if (is_mgmt_rpc_reconnect)2867glusterfs_mgmt_pmap_signin(ctx);2868
2869break;2870default:2871break;2872}2873
2874if (need_term) {2875emancipate(ctx, emval);2876cleanup_and_exit(1);2877}2878
2879return 0;2880}
2881
2882static int2883glusterfs_rpcsvc_notify(rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,2884void *data)2885{
2886return 0;2887}
2888
2889int
2890glusterfs_listener_init(glusterfs_ctx_t *ctx)2891{
2892cmd_args_t *cmd_args = NULL;2893rpcsvc_t *rpc = NULL;2894dict_t *options = NULL;2895int ret = -1;2896
2897cmd_args = &ctx->cmd_args;2898
2899if (ctx->listener)2900return 0;2901
2902if (!cmd_args->sock_file)2903return 0;2904
2905options = dict_new();2906if (!options)2907goto out;2908
2909ret = rpcsvc_transport_unix_options_build(options, cmd_args->sock_file);2910if (ret)2911goto out;2912
2913rpc = rpcsvc_init(THIS, ctx, options, 8);2914if (rpc == NULL) {2915goto out;2916}2917
2918ret = rpcsvc_register_notify(rpc, glusterfs_rpcsvc_notify, THIS);2919if (ret) {2920goto out;2921}2922
2923ret = rpcsvc_create_listeners(rpc, options, "glusterfsd");2924if (ret < 1) {2925goto out;2926}2927
2928ret = rpcsvc_program_register(rpc, &glusterfs_mop_prog, _gf_false);2929if (ret) {2930goto out;2931}2932
2933ctx->listener = rpc;2934
2935out:2936if (options)2937dict_unref(options);2938return ret;2939}
2940
2941int
2942glusterfs_mgmt_notify(int32_t op, void *data, ...)2943{
2944int ret = 0;2945switch (op) {2946case GF_EN_DEFRAG_STATUS:2947ret = glusterfs_rebalance_event_notify((dict_t *)data);2948break;2949
2950default:2951gf_log("", GF_LOG_ERROR, "Invalid op");2952break;2953}2954
2955return ret;2956}
2957
2958int
2959glusterfs_mgmt_init(glusterfs_ctx_t *ctx)2960{
2961cmd_args_t *cmd_args = NULL;2962struct rpc_clnt *rpc = NULL;2963dict_t *options = NULL;2964int ret = -1;2965int port = GF_DEFAULT_BASE_PORT;2966char *host = NULL;2967xlator_cmdline_option_t *opt = NULL;2968
2969cmd_args = &ctx->cmd_args;2970GF_VALIDATE_OR_GOTO(THIS->name, cmd_args->volfile_server, out);2971
2972if (ctx->mgmt)2973return 0;2974
2975options = dict_new();2976if (!options)2977goto out;2978
2979if (cmd_args->volfile_server_port)2980port = cmd_args->volfile_server_port;2981
2982host = cmd_args->volfile_server;2983
2984if (cmd_args->volfile_server_transport &&2985!strcmp(cmd_args->volfile_server_transport, "unix")) {2986ret = rpc_transport_unix_options_build(options, host, 0);2987} else {2988opt = find_xlator_option_in_cmd_args_t("address-family", cmd_args);2989ret = rpc_transport_inet_options_build(options, host, port,2990(opt ? opt->value : NULL));2991}2992if (ret)2993goto out;2994
2995/* Explicitly turn on encrypted transport. */2996if (ctx->secure_mgmt) {2997ret = dict_set_dynstr_with_alloc(options,2998"transport.socket.ssl-enabled", "yes");2999if (ret) {3000gf_log(THIS->name, GF_LOG_ERROR,3001"failed to set 'transport.socket.ssl-enabled' "3002"in options dict");3003goto out;3004}3005
3006ctx->ssl_cert_depth = glusterfs_read_secure_access_file();3007}3008
3009rpc = rpc_clnt_new(options, THIS, THIS->name, 8);3010if (!rpc) {3011ret = -1;3012gf_log(THIS->name, GF_LOG_WARNING, "failed to create rpc clnt");3013goto out;3014}3015
3016ret = rpc_clnt_register_notify(rpc, mgmt_rpc_notify, THIS);3017if (ret) {3018gf_log(THIS->name, GF_LOG_WARNING,3019"failed to register notify function");3020goto out;3021}3022
3023ret = rpcclnt_cbk_program_register(rpc, &mgmt_cbk_prog, THIS);3024if (ret) {3025gf_log(THIS->name, GF_LOG_WARNING,3026"failed to register callback function");3027goto out;3028}3029
3030ctx->notify = glusterfs_mgmt_notify;3031
3032/* This value should be set before doing the 'rpc_clnt_start()' as3033the notify function uses this variable */
3034ctx->mgmt = rpc;3035
3036ret = rpc_clnt_start(rpc);3037out:3038if (options)3039dict_unref(options);3040return ret;3041}
3042