glusterfs

Форк
0
/
glusterd-shd-svc.c 
783 строки · 23.0 Кб
1
/*
2
   Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10

11
#include <glusterfs/globals.h>
12
#include <glusterfs/run.h>
13
#include "glusterd-utils.h"
14
#include "glusterd-volgen.h"
15
#include "glusterd-shd-svc.h"
16
#include "glusterd-shd-svc-helper.h"
17
#include "glusterd-svc-helper.h"
18
#include "glusterd-store.h"
19

20
#define GD_SHD_PROCESS_NAME "--process-name"
21
char *shd_svc_name = "glustershd";
22

23
void
24
glusterd_shdsvc_build(glusterd_svc_t *svc)
25
{
26
    int ret = -1;
27
    ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
28
    if (ret < 0)
29
        return;
30

31
    CDS_INIT_LIST_HEAD(&svc->mux_svc);
32
    svc->manager = glusterd_shdsvc_manager;
33
    svc->start = glusterd_shdsvc_start;
34
    svc->stop = glusterd_shdsvc_stop;
35
    svc->reconfigure = glusterd_shdsvc_reconfigure;
36
}
37

38
int
39
glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn,
40
                     glusterd_svc_proc_t *mux_svc)
41
{
42
    int ret = -1;
43
    char rundir[PATH_MAX] = {
44
        0,
45
    };
46
    char sockpath[PATH_MAX] = {
47
        0,
48
    };
49
    char pidfile[PATH_MAX] = {
50
        0,
51
    };
52
    char volfile[PATH_MAX] = {
53
        0,
54
    };
55
    char logdir[PATH_MAX] = {
56
        0,
57
    };
58
    char logfile[PATH_MAX] = {
59
        0,
60
    };
61
    char volfileid[256] = {0};
62
    glusterd_svc_t *svc = NULL;
63
    glusterd_volinfo_t *volinfo = NULL;
64
    glusterd_conf_t *priv = NULL;
65
    glusterd_muxsvc_conn_notify_t notify = NULL;
66
    xlator_t *this = THIS;
67
    char *volfileserver = NULL;
68
    int32_t len = 0;
69

70
    priv = this->private;
71
    GF_VALIDATE_OR_GOTO(this->name, priv, out);
72

73
    volinfo = data;
74
    GF_VALIDATE_OR_GOTO(this->name, data, out);
75
    GF_VALIDATE_OR_GOTO(this->name, mux_svc, out);
76

77
    svc = &(volinfo->shd.svc);
78

79
    ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name);
80
    if (ret < 0)
81
        goto out;
82

83
    notify = glusterd_muxsvc_common_rpc_notify;
84
    glusterd_store_perform_node_state_store(volinfo);
85

86
    GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv);
87
    glusterd_svc_create_rundir(rundir);
88

89
    glusterd_svc_build_logfile_path(shd_svc_name, priv->logdir, logfile,
90
                                    sizeof(logfile));
91

92
    /* Initialize the connection mgmt */
93
    if (mux_conn && mux_svc->rpc) {
94
        /* This will be unrefed from glusterd_shd_svcproc_cleanup*/
95
        svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc);
96
        ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s",
97
                       mux_conn->sockpath);
98
        if (ret < 0)
99
            goto out;
100
    } else {
101
        ret = mkdir_p(priv->logdir, 0755, _gf_true);
102
        if ((ret == -1) && (EEXIST != errno)) {
103
            gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED,
104
                   "Unable to create logdir %s", logdir);
105
            goto out;
106
        }
107

108
        glusterd_svc_build_shd_socket_filepath(volinfo, sockpath,
109
                                               sizeof(sockpath));
110
        ret = glusterd_muxsvc_conn_init(&(svc->conn), mux_svc, sockpath, 600,
111
                                        notify);
112
        if (ret)
113
            goto out;
114
        /* This will be unrefed when the last svcs is detached from the list */
115
        if (!mux_svc->rpc)
116
            mux_svc->rpc = rpc_clnt_ref(svc->conn.rpc);
117
    }
118

119
    /* Initialize the process mgmt */
120
    glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
121
    glusterd_svc_build_shd_volfile_path(volinfo, volfile, PATH_MAX);
122
    len = snprintf(volfileid, sizeof(volfileid), "shd/%s", volinfo->volname);
123
    if ((len < 0) || (len >= sizeof(volfileid))) {
124
        ret = -1;
125
        goto out;
126
    }
127

128
    if (dict_get_str(this->options, "transport.socket.bind-address",
129
                     &volfileserver) != 0) {
130
        volfileserver = "localhost";
131
    }
132
    ret = glusterd_proc_init(&(svc->proc), shd_svc_name, pidfile, logdir,
133
                             logfile, volfile, volfileid, volfileserver);
134
    if (ret)
135
        goto out;
136

137
out:
138
    gf_msg_debug(this->name, 0, "Returning %d", ret);
139
    return ret;
140
}
141

142
int
143
glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo)
144
{
145
    char filepath[PATH_MAX] = {
146
        0,
147
    };
148

149
    int ret = -1;
150
    dict_t *mod_dict = NULL;
151
    xlator_t *this = THIS;
152

153
    glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX);
154
    if (!glusterd_is_shd_compatible_volume(volinfo)) {
155
        /* If volfile exist, delete it. This case happens when we
156
         * change from replica/ec to distribute.
157
         */
158
        gf_unlink(filepath);
159
        ret = 0;
160
        goto out;
161
    }
162
    mod_dict = dict_new();
163
    if (!mod_dict) {
164
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
165
        goto out;
166
    }
167

168
    ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0);
169
    if (ret) {
170
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
171
                "Key=cluster.background-self-heal-count", NULL);
172
        goto out;
173
    }
174

175
    ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on");
176
    if (ret) {
177
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
178
                "Key=cluster.data-self-heal", NULL);
179
        goto out;
180
    }
181

182
    ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on");
183
    if (ret) {
184
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
185
                "Key=cluster.metadata-self-heal", NULL);
186
        goto out;
187
    }
188

189
    ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on");
190
    if (ret) {
191
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
192
                "Key=cluster.entry-self-heal", NULL);
193
        goto out;
194
    }
195

196
    ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict);
197
    if (ret) {
198
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
199
               "Failed to create volfile");
200
        goto out;
201
    }
202

203
out:
204
    if (mod_dict)
205
        dict_unref(mod_dict);
206
    gf_msg_debug(this->name, 0, "Returning %d", ret);
207

208
    return ret;
209
}
210

211
gf_boolean_t
212
glusterd_svcs_shd_compatible_volumes_stopped(glusterd_svc_t *svc)
213
{
214
    glusterd_svc_proc_t *svc_proc = NULL;
215
    glusterd_shdsvc_t *shd = NULL;
216
    glusterd_svc_t *temp_svc = NULL;
217
    glusterd_volinfo_t *volinfo = NULL;
218
    gf_boolean_t comp = _gf_false;
219
    glusterd_conf_t *conf = THIS->private;
220

221
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
222
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
223
    pthread_mutex_lock(&conf->attach_lock);
224
    {
225
        svc_proc = svc->svc_proc;
226
        if (!svc_proc)
227
            goto unlock;
228
        cds_list_for_each_entry(temp_svc, &svc_proc->svcs, mux_svc)
229
        {
230
            /* Get volinfo->shd from svc object */
231
            shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
232
            if (!shd) {
233
                gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
234
                       "Failed to get shd object "
235
                       "from shd service");
236
                goto unlock;
237
            }
238

239
            /* Get volinfo from shd */
240
            volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
241
            if (!volinfo) {
242
                gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
243
                       "Failed to get volinfo from "
244
                       "from shd");
245
                goto unlock;
246
            }
247
            if (!glusterd_is_shd_compatible_volume(volinfo))
248
                continue;
249
            if (volinfo->status == GLUSTERD_STATUS_STARTED)
250
                goto unlock;
251
        }
252
        comp = _gf_true;
253
    }
254
unlock:
255
    pthread_mutex_unlock(&conf->attach_lock);
256
out:
257
    return comp;
258
}
259

260
int
261
glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags)
262
{
263
    int ret = -1;
264
    glusterd_volinfo_t *volinfo = NULL;
265
    glusterd_conf_t *conf = NULL;
266
    gf_boolean_t shd_restart = _gf_false;
267

268
    conf = THIS->private;
269
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
270
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
271
    volinfo = data;
272
    GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
273

274
    if (volinfo->is_snap_volume) {
275
        /* healing of a snap volume is not supported yet*/
276
        ret = 0;
277
        goto out;
278
    }
279

280
    while (conf->restart_shd) {
281
        synccond_wait(&conf->cond_restart_shd, &conf->big_lock);
282
    }
283
    conf->restart_shd = _gf_true;
284
    shd_restart = _gf_true;
285

286
    if (volinfo)
287
        glusterd_volinfo_ref(volinfo);
288

289
    if (!glusterd_is_shd_compatible_volume(volinfo)) {
290
        ret = 0;
291
        if (svc->inited) {
292
            /* This means glusterd was running for this volume and now
293
             * it was converted to a non-shd volume. So just stop the shd
294
             */
295
            ret = svc->stop(svc, SIGTERM);
296
        }
297
        goto out;
298
    }
299
    ret = glusterd_shdsvc_create_volfile(volinfo);
300
    if (ret)
301
        goto out;
302

303
    ret = glusterd_shd_svc_mux_init(volinfo, svc);
304
    if (ret) {
305
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
306
               "Failed to init shd service");
307
        goto out;
308
    }
309

310
    /* If all the volumes are stopped or all shd compatible volumes
311
     * are stopped then stop the service if:
312
     * - volinfo is NULL or
313
     * - volinfo is present and volume is shd compatible
314
     * Otherwise create volfile and restart service if:
315
     * - volinfo is NULL or
316
     * - volinfo is present and volume is shd compatible
317
     */
318
    if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) {
319
        /* TODO
320
         * Take a lock and detach all svc's to stop the process
321
         * also reset the init flag
322
         */
323
        ret = svc->stop(svc, SIGTERM);
324
    } else if (volinfo) {
325
        if (volinfo->status != GLUSTERD_STATUS_STARTED) {
326
            ret = svc->stop(svc, SIGTERM);
327
            if (ret)
328
                goto out;
329
        }
330
        if (volinfo->status == GLUSTERD_STATUS_STARTED) {
331
            ret = svc->start(svc, flags);
332
            if (ret)
333
                goto out;
334
        }
335
    }
336
out:
337
    if (shd_restart) {
338
        conf->restart_shd = _gf_false;
339
        synccond_broadcast(&conf->cond_restart_shd);
340
    }
341
    if (volinfo)
342
        glusterd_volinfo_unref(volinfo);
343
    if (ret)
344
        gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
345
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
346

347
    return ret;
348
}
349

350
int
351
glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags)
352
{
353
    int ret = -1;
354
    char glusterd_uuid_option[PATH_MAX] = {0};
355
    char client_pid[32] = {0};
356
    dict_t *cmdline = NULL;
357
    xlator_t *this = THIS;
358

359
    cmdline = dict_new();
360
    if (!cmdline)
361
        goto out;
362

363
    ret = snprintf(glusterd_uuid_option, sizeof(glusterd_uuid_option),
364
                   "*replicate*.node-uuid=%s", uuid_utoa(MY_UUID));
365
    if (ret < 0)
366
        goto out;
367

368
    ret = snprintf(client_pid, sizeof(client_pid), "--client-pid=%d",
369
                   GF_CLIENT_PID_SELF_HEALD);
370
    if (ret < 0)
371
        goto out;
372

373
    ret = dict_set_str(cmdline, "arg", client_pid);
374
    if (ret < 0) {
375
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
376
                "Key=arg", NULL);
377
        goto out;
378
    }
379

380
    /* Pass cmdline arguments as key-value pair. The key is merely
381
     * a carrier and is not used. Since dictionary follows LIFO the value
382
     * should be put in reverse order*/
383
    ret = dict_set_str(cmdline, "arg4", svc->name);
384
    if (ret) {
385
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
386
                "Key=arg4", NULL);
387
        goto out;
388
    }
389

390
    ret = dict_set_str(cmdline, "arg3", GD_SHD_PROCESS_NAME);
391
    if (ret) {
392
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
393
                "Key=arg3", NULL);
394
        goto out;
395
    }
396

397
    ret = dict_set_str(cmdline, "arg2", glusterd_uuid_option);
398
    if (ret) {
399
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
400
                "Key=arg2", NULL);
401
        goto out;
402
    }
403

404
    ret = dict_set_str(cmdline, "arg1", "--xlator-option");
405
    if (ret) {
406
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
407
                "Key=arg1", NULL);
408
        goto out;
409
    }
410

411
    ret = glusterd_svc_start(svc, flags, cmdline);
412
    if (ret) {
413
        gf_smsg(this->name, GF_LOG_ERROR, errno,
414
                GD_MSG_GLUSTER_SERVICE_START_FAIL, NULL);
415
        goto out;
416
    }
417

418
    ret = glusterd_conn_connect(&(svc->conn));
419
out:
420
    if (cmdline)
421
        dict_unref(cmdline);
422
    return ret;
423
}
424

425
int
426
glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo,
427
                                    glusterd_svc_t *svc, int flags)
428
{
429
    int ret = -1;
430
    glusterd_svc_proc_t *mux_proc = NULL;
431
    glusterd_conf_t *conf = NULL;
432

433
    conf = THIS->private;
434

435
    if (!conf || !volinfo || !svc)
436
        return -1;
437
    glusterd_shd_svcproc_cleanup(&volinfo->shd);
438
    mux_proc = glusterd_svcprocess_new();
439
    if (!mux_proc) {
440
        return -1;
441
    }
442
    ret = glusterd_shdsvc_init(volinfo, NULL, mux_proc);
443
    if (ret)
444
        return -1;
445
    pthread_mutex_lock(&conf->attach_lock);
446
    {
447
        cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
448
        svc->svc_proc = mux_proc;
449
        cds_list_del_init(&svc->mux_svc);
450
        cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
451
    }
452
    pthread_mutex_unlock(&conf->attach_lock);
453

454
    ret = glusterd_new_shd_svc_start(svc, flags);
455
    if (!ret) {
456
        volinfo->shd.attached = _gf_true;
457
    }
458
    return ret;
459
}
460

461
int
462
glusterd_shdsvc_start(glusterd_svc_t *svc, int flags)
463
{
464
    int ret = -1;
465
    glusterd_shdsvc_t *shd = NULL;
466
    glusterd_volinfo_t *volinfo = NULL;
467
    glusterd_conf_t *conf = NULL;
468

469
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
470
    conf = THIS->private;
471
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
472

473
    /* Get volinfo->shd from svc object */
474
    shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
475
    if (!shd) {
476
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
477
               "Failed to get shd object "
478
               "from shd service");
479
        return -1;
480
    }
481

482
    /* Get volinfo from shd */
483
    volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
484
    if (!volinfo) {
485
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
486
               "Failed to get volinfo from "
487
               "from shd");
488
        return -1;
489
    }
490

491
    if (volinfo->status != GLUSTERD_STATUS_STARTED)
492
        return -1;
493

494
    glusterd_volinfo_ref(volinfo);
495

496
    if (!svc->inited) {
497
        ret = glusterd_shd_svc_mux_init(volinfo, svc);
498
        if (ret)
499
            goto out;
500
    }
501

502
    if (shd->attached) {
503
        glusterd_volinfo_ref(volinfo);
504
        /* Unref will happen from glusterd_svc_attach_cbk */
505
        ret = glusterd_attach_svc(svc, volinfo, flags);
506
        if (ret) {
507
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
508
                   "Failed to attach shd svc(volume=%s) to pid=%d",
509
                   volinfo->volname, glusterd_proc_get_pid(&svc->proc));
510
            glusterd_shd_svcproc_cleanup(&volinfo->shd);
511
            glusterd_volinfo_unref(volinfo);
512
            goto out1;
513
        }
514
        goto out;
515
    }
516
    ret = glusterd_new_shd_svc_start(svc, flags);
517
    if (!ret) {
518
        shd->attached = _gf_true;
519
    }
520
out:
521
    if (ret && volinfo)
522
        glusterd_shd_svcproc_cleanup(&volinfo->shd);
523
    if (volinfo)
524
        glusterd_volinfo_unref(volinfo);
525
out1:
526
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
527

528
    return ret;
529
}
530

531
int
532
glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo)
533
{
534
    int ret = -1;
535
    xlator_t *this = THIS;
536
    gf_boolean_t identical = _gf_false;
537
    dict_t *mod_dict = NULL;
538
    glusterd_svc_t *svc = NULL;
539

540
    if (!volinfo) {
541
        /* reconfigure will be called separately*/
542
        ret = 0;
543
        goto out;
544
    }
545

546
    glusterd_volinfo_ref(volinfo);
547
    svc = &(volinfo->shd.svc);
548
    if (glusterd_svcs_shd_compatible_volumes_stopped(svc))
549
        goto manager;
550

551
    /*
552
     * Check both OLD and NEW volfiles, if they are SAME by size
553
     * and cksum i.e. "character-by-character". If YES, then
554
     * NOTHING has been changed, just return.
555
     */
556

557
    if (!glusterd_is_shd_compatible_volume(volinfo)) {
558
        if (svc->inited)
559
            goto manager;
560

561
        /* Nothing to do if not shd compatible */
562
        ret = 0;
563
        goto out;
564
    }
565
    mod_dict = dict_new();
566
    if (!mod_dict) {
567
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
568
        goto out;
569
    }
570

571
    ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0);
572
    if (ret) {
573
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
574
                "Key=cluster.background-self-heal-count", NULL);
575
        goto out;
576
    }
577

578
    ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on");
579
    if (ret) {
580
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
581
                "Key=cluster.data-self-heal", NULL);
582
        goto out;
583
    }
584

585
    ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on");
586
    if (ret) {
587
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
588
                "Key=cluster.metadata-self-heal", NULL);
589
        goto out;
590
    }
591

592
    ret = dict_set_int32(mod_dict, "graph-check", 1);
593
    if (ret) {
594
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
595
                "Key=graph-check", NULL);
596
        goto out;
597
    }
598

599
    ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on");
600
    if (ret) {
601
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
602
                "Key=cluster.entry-self-heal", NULL);
603
        goto out;
604
    }
605

606
    ret = glusterd_volume_svc_check_volfile_identical(
607
        "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
608
        &identical);
609
    if (ret)
610
        goto out;
611

612
    if (identical) {
613
        ret = 0;
614
        goto out;
615
    }
616

617
    /*
618
     * They are not identical. Find out if the topology is changed
619
     * OR just the volume options. If just the options which got
620
     * changed, then inform the xlator to reconfigure the options.
621
     */
622
    identical = _gf_false; /* RESET the FLAG */
623
    ret = glusterd_volume_svc_check_topology_identical(
624
        "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile,
625
        &identical);
626
    if (ret)
627
        goto out;
628

629
    /* Topology is not changed, but just the options. But write the
630
     * options to shd volfile, so that shd will be reconfigured.
631
     */
632
    if (identical) {
633
        ret = glusterd_shdsvc_create_volfile(volinfo);
634
        if (ret == 0) { /* Only if above PASSES */
635
            ret = glusterd_fetchspec_notify(THIS);
636
        }
637
        goto out;
638
    }
639
manager:
640
    /*
641
     * shd volfile's topology has been changed. volfile needs
642
     * to be RECONFIGURED to ACT on the changed volfile.
643
     */
644
    ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
645

646
out:
647
    if (volinfo)
648
        glusterd_volinfo_unref(volinfo);
649
    if (mod_dict)
650
        dict_unref(mod_dict);
651
    gf_msg_debug(this->name, 0, "Returning %d", ret);
652
    return ret;
653
}
654

655
int
656
glusterd_shdsvc_restart(void)
657
{
658
    glusterd_volinfo_t *volinfo = NULL;
659
    glusterd_volinfo_t *tmp = NULL;
660
    int ret = -1;
661
    xlator_t *this = THIS;
662
    glusterd_conf_t *conf = NULL;
663
    glusterd_svc_t *svc = NULL;
664

665
    conf = this->private;
666
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
667

668
    pthread_mutex_lock(&conf->volume_lock);
669
    cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list)
670
    {
671
        glusterd_volinfo_ref(volinfo);
672
        pthread_mutex_unlock(&conf->volume_lock);
673
        /* Start per volume shd svc */
674
        if (volinfo->status == GLUSTERD_STATUS_STARTED) {
675
            svc = &(volinfo->shd.svc);
676
            ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
677
            if (ret) {
678
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHD_START_FAIL,
679
                       "Couldn't start shd for "
680
                       "vol: %s on restart",
681
                       volinfo->volname);
682
                gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s",
683
                         volinfo->volname, svc->name);
684
                glusterd_volinfo_unref(volinfo);
685
                goto out;
686
            }
687
        }
688
        glusterd_volinfo_unref(volinfo);
689
        pthread_mutex_lock(&conf->volume_lock);
690
    }
691
    pthread_mutex_unlock(&conf->volume_lock);
692
out:
693
    return ret;
694
}
695

696
int
697
glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig)
698
{
699
    int ret = -1;
700
    glusterd_svc_proc_t *svc_proc = NULL;
701
    glusterd_shdsvc_t *shd = NULL;
702
    glusterd_volinfo_t *volinfo = NULL;
703
    gf_boolean_t empty = _gf_false;
704
    glusterd_conf_t *conf = NULL;
705
    int pid = -1;
706
    xlator_t *this = THIS;
707

708
    conf = this->private;
709
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
710
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
711
    svc_proc = svc->svc_proc;
712
    if (!svc_proc) {
713
        /*
714
         * This can happen when stop was called on a volume that is not shd
715
         * compatible.
716
         */
717
        gf_msg_debug("glusterd", 0, "svc_proc is null, ie shd already stopped");
718
        ret = 0;
719
        goto out;
720
    }
721

722
    /* Get volinfo->shd from svc object */
723
    shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
724
    if (!shd) {
725
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
726
               "Failed to get shd object "
727
               "from shd service");
728
        return -1;
729
    }
730

731
    /* Get volinfo from shd */
732
    volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
733
    if (!volinfo) {
734
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
735
               "Failed to get volinfo from "
736
               "from shd");
737
        return -1;
738
    }
739

740
    glusterd_volinfo_ref(volinfo);
741
    pthread_mutex_lock(&conf->attach_lock);
742
    {
743
        if (!gf_is_service_running(svc->proc.pidfile, &pid)) {
744
            gf_msg_debug(this->name, 0, "shd isn't running");
745
        }
746
        cds_list_del_init(&svc->mux_svc);
747
        empty = cds_list_empty(&svc_proc->svcs);
748
        if (empty) {
749
            svc_proc->status = GF_SVC_STOPPING;
750
            cds_list_del_init(&svc_proc->svc_proc_list);
751
        }
752
    }
753
    pthread_mutex_unlock(&conf->attach_lock);
754
    if (empty) {
755
        /* Unref will happen when destroying the connection */
756
        glusterd_volinfo_ref(volinfo);
757
        svc_proc->data = volinfo;
758
        ret = glusterd_svc_stop(svc, sig);
759
        if (ret) {
760
            glusterd_volinfo_unref(volinfo);
761
            goto out;
762
        }
763
    }
764
    if (!empty && pid != -1) {
765
        ret = glusterd_detach_svc(svc, volinfo, sig);
766
        if (ret)
767
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
768
                   "shd service is failed to detach volume %s from pid %d",
769
                   volinfo->volname, glusterd_proc_get_pid(&svc->proc));
770
        else
771
            gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_STOP_SUCCESS,
772
                   "Shd service is detached for volume %s from pid %d",
773
                   volinfo->volname, glusterd_proc_get_pid(&svc->proc));
774
    }
775
    svc->online = _gf_false;
776
    gf_unlink(svc->proc.pidfile);
777
    glusterd_shd_svcproc_cleanup(shd);
778
    ret = 0;
779
    glusterd_volinfo_unref(volinfo);
780
out:
781
    gf_msg_debug(this->name, 0, "Returning %d", ret);
782
    return ret;
783
}
784

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.