glusterfs

Форк
0
/
glusterd-svc-helper.c 
1040 строк · 29.3 Кб
1
/*
2
   Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10
#include <signal.h>
11

12
#include <glusterfs/globals.h>
13
#include <glusterfs/run.h>
14
#include "glusterd.h"
15
#include <glusterfs/glusterfs.h>
16
#include "glusterd-utils.h"
17
#include "glusterd-svc-mgmt.h"
18
#include "glusterd-shd-svc.h"
19
#include "glusterd-quotad-svc.h"
20
#ifdef BUILD_GNFS
21
#include "glusterd-nfs-svc.h"
22
#endif
23
#include "glusterd-bitd-svc.h"
24
#include "glusterd-shd-svc-helper.h"
25
#include "glusterd-scrub-svc.h"
26
#include "glusterd-svc-helper.h"
27
#include <glusterfs/syscall.h>
28
#include "glusterd-snapshot-utils.h"
29

30
int
31
glusterd_svcs_reconfigure(glusterd_volinfo_t *volinfo)
32
{
33
    int ret = 0;
34
    xlator_t *this = THIS;
35
    glusterd_conf_t *conf = NULL;
36
    char *svc_name = NULL;
37

38
    conf = this->private;
39
    GF_ASSERT(conf);
40

41
#ifdef BUILD_GNFS
42
    svc_name = "nfs";
43
    ret = glusterd_nfssvc_reconfigure();
44
    if (ret)
45
        goto out;
46
#endif
47
    svc_name = "self-heald";
48
    if (volinfo) {
49
        ret = glusterd_shdsvc_reconfigure(volinfo);
50
        if (ret)
51
            goto out;
52
    }
53

54
    if (conf->op_version == GD_OP_VERSION_MIN)
55
        goto out;
56

57
    svc_name = "quotad";
58
    ret = glusterd_quotadsvc_reconfigure();
59
    if (ret)
60
        goto out;
61

62
    svc_name = "bitd";
63
    ret = glusterd_bitdsvc_reconfigure();
64
    if (ret)
65
        goto out;
66

67
    svc_name = "scrubber";
68
    ret = glusterd_scrubsvc_reconfigure();
69
out:
70
    if (ret && svc_name)
71
        gf_event(EVENT_SVC_RECONFIGURE_FAILED, "svc_name=%s", svc_name);
72
    return ret;
73
}
74

75
int
76
glusterd_svcs_stop(glusterd_volinfo_t *volinfo)
77
{
78
    int ret = 0;
79
    glusterd_conf_t *priv = NULL;
80

81
    priv = THIS->private;
82
    GF_ASSERT(priv);
83

84
#ifdef BUILD_GNFS
85
    ret = priv->nfs_svc.stop(&(priv->nfs_svc), SIGKILL);
86
    if (ret)
87
        goto out;
88
#endif
89
    ret = priv->quotad_svc.stop(&(priv->quotad_svc), SIGTERM);
90
    if (ret)
91
        goto out;
92

93
    if (volinfo) {
94
        ret = volinfo->shd.svc.stop(&(volinfo->shd.svc), SIGTERM);
95
        if (ret)
96
            goto out;
97
    }
98

99
    ret = priv->bitd_svc.stop(&(priv->bitd_svc), SIGTERM);
100
    if (ret)
101
        goto out;
102

103
    ret = priv->scrub_svc.stop(&(priv->scrub_svc), SIGTERM);
104

105
out:
106
    return ret;
107
}
108

109
int
110
glusterd_svcs_manager(glusterd_volinfo_t *volinfo)
111
{
112
    int ret = 0;
113
    glusterd_conf_t *conf = NULL;
114

115
    conf = THIS->private;
116
    GF_ASSERT(conf);
117

118
    if (volinfo && volinfo->is_snap_volume)
119
        return 0;
120

121
#if BUILD_GNFS
122
    ret = conf->nfs_svc.manager(&(conf->nfs_svc), NULL, PROC_START_NO_WAIT);
123
    if (ret)
124
        goto out;
125
#endif
126
    if (conf->op_version == GD_OP_VERSION_MIN)
127
        goto out;
128

129
    ret = conf->quotad_svc.manager(&(conf->quotad_svc), volinfo,
130
                                   PROC_START_NO_WAIT);
131
    if (ret == -EINVAL)
132
        ret = 0;
133
    if (ret)
134
        goto out;
135

136
    ret = conf->bitd_svc.manager(&(conf->bitd_svc), NULL, PROC_START_NO_WAIT);
137
    if (ret == -EINVAL)
138
        ret = 0;
139
    if (ret)
140
        goto out;
141

142
    if (volinfo) {
143
        ret = volinfo->shd.svc.manager(&(volinfo->shd.svc), volinfo,
144
                                       PROC_START_NO_WAIT);
145
        if (ret == -EINVAL)
146
            ret = 0;
147
        if (ret)
148
            goto out;
149
    }
150

151
    ret = conf->scrub_svc.manager(&(conf->scrub_svc), NULL, PROC_START_NO_WAIT);
152
    if (ret == -EINVAL)
153
        ret = 0;
154
out:
155
    return ret;
156
}
157

158
int
159
glusterd_svc_check_volfile_identical(char *svc_name,
160
                                     glusterd_graph_builder_t builder,
161
                                     gf_boolean_t *identical)
162
{
163
    char orgvol[PATH_MAX] = {
164
        0,
165
    };
166
    char *tmpvol = NULL;
167
    glusterd_conf_t *conf = NULL;
168
    xlator_t *this = THIS;
169
    int ret = -1;
170
    int need_unlink = 0;
171
    int tmp_fd = -1;
172

173
    GF_ASSERT(identical);
174
    conf = this->private;
175

176
    glusterd_svc_build_volfile_path(svc_name, conf->workdir, orgvol,
177
                                    sizeof(orgvol));
178

179
    ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
180
    if (ret < 0) {
181
        goto out;
182
    }
183

184
    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
185
    tmp_fd = mkstemp(tmpvol);
186
    if (tmp_fd < 0) {
187
        gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
188
               "Unable to create temp file"
189
               " %s:(%s)",
190
               tmpvol, strerror(errno));
191
        ret = -1;
192
        goto out;
193
    }
194

195
    need_unlink = 1;
196

197
    ret = glusterd_create_global_volfile(builder, tmpvol, NULL);
198
    if (ret)
199
        goto out;
200

201
    ret = glusterd_check_files_identical(orgvol, tmpvol, identical);
202
out:
203
    if (need_unlink)
204
        sys_unlink(tmpvol);
205

206
    if (tmpvol != NULL)
207
        GF_FREE(tmpvol);
208

209
    if (tmp_fd >= 0)
210
        sys_close(tmp_fd);
211

212
    return ret;
213
}
214

215
int
216
glusterd_svc_check_topology_identical(char *svc_name,
217
                                      glusterd_graph_builder_t builder,
218
                                      gf_boolean_t *identical)
219
{
220
    char orgvol[PATH_MAX] = {
221
        0,
222
    };
223
    char *tmpvol = NULL;
224
    glusterd_conf_t *conf = NULL;
225
    xlator_t *this = THIS;
226
    int ret = -1;
227
    int tmpclean = 0;
228
    int tmpfd = -1;
229

230
    if ((!identical) || (!this->private)) {
231
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
232
        goto out;
233
    }
234

235
    conf = this->private;
236
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
237

238
    /* Fetch the original volfile */
239
    glusterd_svc_build_volfile_path(svc_name, conf->workdir, orgvol,
240
                                    sizeof(orgvol));
241

242
    /* Create the temporary volfile */
243
    ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
244
    if (ret < 0) {
245
        goto out;
246
    }
247

248
    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
249
    tmpfd = mkstemp(tmpvol);
250
    if (tmpfd < 0) {
251
        gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
252
               "Unable to create temp file"
253
               " %s:(%s)",
254
               tmpvol, strerror(errno));
255
        ret = -1;
256
        goto out;
257
    }
258

259
    tmpclean = 1; /* SET the flag to unlink() tmpfile */
260

261
    ret = glusterd_create_global_volfile(builder, tmpvol, NULL);
262
    if (ret)
263
        goto out;
264

265
    /* Compare the topology of volfiles */
266
    ret = glusterd_check_topology_identical(orgvol, tmpvol, identical);
267
out:
268
    if (tmpfd >= 0)
269
        sys_close(tmpfd);
270
    if (tmpclean)
271
        sys_unlink(tmpvol);
272
    if (tmpvol != NULL)
273
        GF_FREE(tmpvol);
274
    return ret;
275
}
276

277
int
278
glusterd_volume_svc_check_volfile_identical(
279
    char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
280
    glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
281
{
282
    char orgvol[PATH_MAX] = {
283
        0,
284
    };
285
    char *tmpvol = NULL;
286
    xlator_t *this = THIS;
287
    int ret = -1;
288
    int need_unlink = 0;
289
    int tmp_fd = -1;
290

291
    GF_VALIDATE_OR_GOTO(this->name, identical, out);
292

293
    /* This builds volfile for volume level dameons */
294
    glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
295
                                           sizeof(orgvol));
296

297
    ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
298
    if (ret < 0) {
299
        goto out;
300
    }
301

302
    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
303
    tmp_fd = mkstemp(tmpvol);
304
    if (tmp_fd < 0) {
305
        gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
306
               "Unable to create temp file"
307
               " %s:(%s)",
308
               tmpvol, strerror(errno));
309
        ret = -1;
310
        goto out;
311
    }
312

313
    need_unlink = 1;
314

315
    ret = builder(volinfo, tmpvol, mode_dict);
316
    if (ret)
317
        goto out;
318

319
    ret = glusterd_check_files_identical(orgvol, tmpvol, identical);
320
out:
321
    if (need_unlink)
322
        sys_unlink(tmpvol);
323

324
    if (tmpvol != NULL)
325
        GF_FREE(tmpvol);
326

327
    if (tmp_fd >= 0)
328
        sys_close(tmp_fd);
329

330
    return ret;
331
}
332

333
int
334
glusterd_volume_svc_check_topology_identical(
335
    char *svc_name, dict_t *mode_dict, glusterd_volinfo_t *volinfo,
336
    glusterd_vol_graph_builder_t builder, gf_boolean_t *identical)
337
{
338
    char orgvol[PATH_MAX] = {
339
        0,
340
    };
341
    char *tmpvol = NULL;
342
    glusterd_conf_t *conf = NULL;
343
    xlator_t *this = THIS;
344
    int ret = -1;
345
    int tmpclean = 0;
346
    int tmpfd = -1;
347

348
    if ((!identical) || (!this->private)) {
349
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
350
        goto out;
351
    }
352

353
    conf = this->private;
354
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
355

356
    /* This builds volfile for volume level dameons */
357
    glusterd_volume_svc_build_volfile_path(svc_name, volinfo, orgvol,
358
                                           sizeof(orgvol));
359
    /* Create the temporary volfile */
360
    ret = gf_asprintf(&tmpvol, "/tmp/g%s-XXXXXX", svc_name);
361
    if (ret < 0) {
362
        goto out;
363
    }
364

365
    /* coverity[SECURE_TEMP] mkstemp uses 0600 as the mode and is safe */
366
    tmpfd = mkstemp(tmpvol);
367
    if (tmpfd < 0) {
368
        gf_msg(this->name, GF_LOG_WARNING, errno, GD_MSG_FILE_OP_FAILED,
369
               "Unable to create temp file"
370
               " %s:(%s)",
371
               tmpvol, strerror(errno));
372
        ret = -1;
373
        goto out;
374
    }
375

376
    tmpclean = 1; /* SET the flag to unlink() tmpfile */
377

378
    ret = builder(volinfo, tmpvol, mode_dict);
379
    if (ret)
380
        goto out;
381

382
    /* Compare the topology of volfiles */
383
    ret = glusterd_check_topology_identical(orgvol, tmpvol, identical);
384
out:
385
    if (tmpfd >= 0)
386
        sys_close(tmpfd);
387
    if (tmpclean)
388
        sys_unlink(tmpvol);
389
    if (tmpvol != NULL)
390
        GF_FREE(tmpvol);
391
    return ret;
392
}
393

394
gf_boolean_t
395
glusterd_is_svcproc_attachable(glusterd_svc_proc_t *svc_proc)
396
{
397
    int pid = -1;
398
    glusterd_svc_t *parent_svc = NULL;
399

400
    if (!svc_proc)
401
        return _gf_false;
402

403
    if (svc_proc->status == GF_SVC_STARTING)
404
        return _gf_true;
405

406
    if (svc_proc->status == GF_SVC_STARTED ||
407
        svc_proc->status == GF_SVC_DISCONNECTED) {
408
        parent_svc = cds_list_entry(svc_proc->svcs.next, glusterd_svc_t,
409
                                    mux_svc);
410
        if (parent_svc && gf_is_service_running(parent_svc->proc.pidfile, &pid))
411
            return _gf_true;
412
    }
413

414
    if (svc_proc->status == GF_SVC_DIED || svc_proc->status == GF_SVC_STOPPING)
415
        return _gf_false;
416

417
    return _gf_false;
418
}
419

420
void *
421
__gf_find_compatible_svc(gd_node_type daemon)
422
{
423
    glusterd_svc_proc_t *svc_proc = NULL;
424
    struct cds_list_head *svc_procs = NULL;
425
    glusterd_conf_t *conf = NULL;
426

427
    conf = THIS->private;
428
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
429

430
    switch (daemon) {
431
        case GD_NODE_SHD: {
432
            svc_procs = &conf->shd_procs;
433
            if (!svc_procs)
434
                goto out;
435
        } break;
436
        default:
437
            /* Add support for other client daemons here */
438
            goto out;
439
    }
440

441
    cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
442
    {
443
        if (glusterd_is_svcproc_attachable(svc_proc))
444
            return (void *)svc_proc;
445
        /*
446
         * Logic to select one process goes here. Currently there is only one
447
         * shd_proc. So selecting the first one;
448
         */
449
    }
450
out:
451
    return NULL;
452
}
453

454
glusterd_svc_proc_t *
455
glusterd_svcprocess_new(void)
456
{
457
    glusterd_svc_proc_t *new_svcprocess = NULL;
458

459
    new_svcprocess = GF_CALLOC(1, sizeof(*new_svcprocess),
460
                               gf_gld_mt_glusterd_svc_proc_t);
461

462
    if (!new_svcprocess)
463
        return NULL;
464

465
    CDS_INIT_LIST_HEAD(&new_svcprocess->svc_proc_list);
466
    CDS_INIT_LIST_HEAD(&new_svcprocess->svcs);
467
    new_svcprocess->notify = glusterd_muxsvc_common_rpc_notify;
468
    new_svcprocess->status = GF_SVC_STARTING;
469
    return new_svcprocess;
470
}
471

472
int
473
glusterd_shd_svc_mux_init(glusterd_volinfo_t *volinfo, glusterd_svc_t *svc)
474
{
475
    int ret = -1;
476
    glusterd_svc_proc_t *mux_proc = NULL;
477
    glusterd_conn_t *mux_conn = NULL;
478
    glusterd_conf_t *conf = NULL;
479
    glusterd_svc_t *parent_svc = NULL;
480
    int pid = -1;
481
    gf_boolean_t stop_daemon = _gf_false;
482
    char pidfile[PATH_MAX] = {
483
        0,
484
    };
485

486
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
487
    GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
488
    conf = THIS->private;
489
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
490
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
491

492
    pthread_mutex_lock(&conf->attach_lock);
493
    {
494
        if (svc->inited && !glusterd_proc_is_running(&(svc->proc))) {
495
            /* This is the case when shd process was abnormally killed */
496
            pthread_mutex_unlock(&conf->attach_lock);
497
            glusterd_shd_svcproc_cleanup(&volinfo->shd);
498
            pthread_mutex_lock(&conf->attach_lock);
499
        }
500

501
        if (!svc->inited) {
502
            glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile));
503
            ret = snprintf(svc->proc.name, sizeof(svc->proc.name), "%s",
504
                           "glustershd");
505
            if (ret < 0)
506
                goto unlock;
507

508
            ret = snprintf(svc->proc.pidfile, sizeof(svc->proc.pidfile), "%s",
509
                           pidfile);
510
            if (ret < 0)
511
                goto unlock;
512

513
            if (gf_is_service_running(pidfile, &pid)) {
514
                /* Just connect is required, but we don't know what happens
515
                 * during the disconnect. So better to reattach.
516
                 */
517
                mux_proc = __gf_find_compatible_svc_from_pid(GD_NODE_SHD, pid);
518
            }
519

520
            if (!mux_proc) {
521
                if (pid != -1 && sys_access(pidfile, R_OK) == 0) {
522
                    /* stale pid file, stop and unlink it. This has to be
523
                     * done outside the attach_lock.
524
                     */
525
                    stop_daemon = _gf_true;
526
                }
527
                mux_proc = __gf_find_compatible_svc(GD_NODE_SHD);
528
            }
529
            if (mux_proc) {
530
                /* Take first entry from the process */
531
                parent_svc = cds_list_entry(mux_proc->svcs.next, glusterd_svc_t,
532
                                            mux_svc);
533
                mux_conn = &parent_svc->conn;
534
                if (volinfo)
535
                    volinfo->shd.attached = _gf_true;
536
            } else {
537
                mux_proc = glusterd_svcprocess_new();
538
                if (!mux_proc) {
539
                    ret = -1;
540
                    goto unlock;
541
                }
542
                cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs);
543
            }
544
            svc->svc_proc = mux_proc;
545
            cds_list_del_init(&svc->mux_svc);
546
            cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs);
547
            ret = glusterd_shdsvc_init(volinfo, mux_conn, mux_proc);
548
            if (ret) {
549
                pthread_mutex_unlock(&conf->attach_lock);
550
                gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC,
551
                       "Failed to init shd "
552
                       "service");
553
                goto out;
554
            }
555
            gf_msg_debug(THIS->name, 0, "shd service initialized");
556
            svc->inited = _gf_true;
557
        }
558
        ret = 0;
559
    }
560
unlock:
561
    pthread_mutex_unlock(&conf->attach_lock);
562
out:
563
    if (stop_daemon) {
564
        glusterd_proc_stop(&svc->proc, SIGTERM, PROC_STOP_FORCE);
565
        gf_unlink(pidfile);
566
    }
567
    return ret;
568
}
569

570
void *
571
__gf_find_compatible_svc_from_pid(gd_node_type daemon, pid_t pid)
572
{
573
    glusterd_svc_proc_t *svc_proc = NULL;
574
    struct cds_list_head *svc_procs = NULL;
575
    glusterd_svc_t *svc = NULL;
576
    pid_t mux_pid = -1;
577
    glusterd_conf_t *conf = NULL;
578

579
    conf = THIS->private;
580
    if (!conf)
581
        return NULL;
582

583
    switch (daemon) {
584
        case GD_NODE_SHD: {
585
            svc_procs = &conf->shd_procs;
586
            if (!svc_procs)
587
                return NULL;
588
        } break;
589
        default:
590
            /* Add support for other client daemons here */
591
            return NULL;
592
    }
593

594
    cds_list_for_each_entry(svc_proc, svc_procs, svc_proc_list)
595
    {
596
        cds_list_for_each_entry(svc, &svc_proc->svcs, mux_svc)
597
        {
598
            if (gf_is_service_running(svc->proc.pidfile, &mux_pid)) {
599
                if (mux_pid == pid &&
600
                    glusterd_is_svcproc_attachable(svc_proc)) {
601
                    /*TODO
602
                     * inefficient loop, but at the moment, there is only
603
                     * one shd.
604
                     */
605
                    return svc_proc;
606
                }
607
            }
608
        }
609
    }
610
    return NULL;
611
}
612

613
static int32_t
614
my_callback(struct rpc_req *req, struct iovec *iov, int count, void *v_frame)
615
{
616
    call_frame_t *frame = v_frame;
617
    xlator_t *this = NULL;
618
    glusterd_conf_t *conf = NULL;
619

620
    GF_VALIDATE_OR_GOTO("glusterd", frame, out);
621
    this = frame->this;
622
    GF_VALIDATE_OR_GOTO("glusterd", this, out);
623
    conf = this->private;
624
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
625

626
    if (GF_ATOMIC_DEC(conf->blockers) == 0) {
627
        synccond_broadcast(&conf->cond_blockers);
628
    }
629

630
    STACK_DESTROY(frame->root);
631
out:
632
    return 0;
633
}
634

635
static int32_t
636
glusterd_svc_attach_cbk(struct rpc_req *req, struct iovec *iov, int count,
637
                        void *v_frame)
638
{
639
    call_frame_t *frame = v_frame;
640
    glusterd_volinfo_t *volinfo = NULL;
641
    glusterd_shdsvc_t *shd = NULL;
642
    glusterd_svc_t *svc = frame->cookie;
643
    glusterd_conf_t *conf = NULL;
644
    int *flag = (int *)frame->local;
645
    xlator_t *this = THIS;
646
    int ret = -1;
647
    gf_getspec_rsp rsp = {
648
        0,
649
    };
650

651
    conf = this->private;
652
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
653
    GF_VALIDATE_OR_GOTO("glusterd", frame, out);
654
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
655

656
    frame->local = NULL;
657
    frame->cookie = NULL;
658

659
    if (!strcmp(svc->name, "glustershd")) {
660
        /* Get volinfo->shd from svc object */
661
        shd = cds_list_entry(svc, glusterd_shdsvc_t, svc);
662
        if (!shd) {
663
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL,
664
                   "Failed to get shd object "
665
                   "from shd service");
666
            goto out;
667
        }
668

669
        /* Get volinfo from shd */
670
        volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd);
671
        if (!volinfo) {
672
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
673
                   "Failed to get volinfo from "
674
                   "from shd");
675
            goto out;
676
        }
677
    }
678

679
    if (!iov) {
680
        gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
681
               "iov is NULL");
682
        ret = -1;
683
        goto out;
684
    }
685

686
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
687
    if (ret < 0) {
688
        gf_msg(frame->this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
689
               "XDR decoding error");
690
        ret = -1;
691
        goto out;
692
    }
693

694
    if (rsp.op_ret == 0) {
695
        svc->online = _gf_true;
696
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
697
               "svc %s of volume %s attached successfully to pid %d", svc->name,
698
               volinfo->volname, glusterd_proc_get_pid(&svc->proc));
699
    } else {
700
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
701
               "svc %s of volume %s failed to attach to pid %d", svc->name,
702
               volinfo->volname, glusterd_proc_get_pid(&svc->proc));
703
        if (!strcmp(svc->name, "glustershd")) {
704
            glusterd_shd_svcproc_cleanup(&volinfo->shd);
705
        }
706
    }
707
out:
708
    if (flag) {
709
        GF_FREE(flag);
710
    }
711

712
    if (volinfo)
713
        glusterd_volinfo_unref(volinfo);
714

715
    if (GF_ATOMIC_DEC(conf->blockers) == 0) {
716
        synccond_broadcast(&conf->cond_blockers);
717
    }
718
    STACK_DESTROY(frame->root);
719
    return 0;
720
}
721

722
extern size_t
723
build_volfile_path(char *volume_id, char *path, size_t path_len,
724
                   char *trusted_str, dict_t *dict);
725

726
int
727
__glusterd_send_svc_configure_req(glusterd_svc_t *svc, int flags,
728
                                  struct rpc_clnt *rpc, char *volfile_id,
729
                                  int op)
730
{
731
    int ret = -1;
732
    struct iobuf *iobuf = NULL;
733
    struct iobref *iobref = NULL;
734
    struct iovec iov = {
735
        0,
736
    };
737
    char path[PATH_MAX] = {
738
        '\0',
739
    };
740
    struct stat stbuf = {
741
        0,
742
    };
743
    int32_t spec_fd = -1;
744
    size_t file_len = -1;
745
    char *volfile_content = NULL;
746
    ssize_t req_size = 0;
747
    call_frame_t *frame = NULL;
748
    gd1_mgmt_brick_op_req brick_req;
749
    dict_t *dict = NULL;
750
    void *req = &brick_req;
751
    struct rpc_clnt_connection *conn;
752
    xlator_t *this = THIS;
753
    glusterd_conf_t *conf = THIS->private;
754
    extern struct rpc_clnt_program gd_brick_prog;
755
    fop_cbk_fn_t cbkfn = my_callback;
756

757
    if (!rpc) {
758
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PARAM_NULL,
759
               "called with null rpc");
760
        return -1;
761
    }
762

763
    conn = &rpc->conn;
764
    if (rpc_clnt_connection_status(conn) != RPC_STATUS_CONNECTED) {
765
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
766
               "not connected yet");
767
        return -1;
768
    }
769

770
    brick_req.op = op;
771
    brick_req.name = volfile_id;
772
    brick_req.input.input_val = NULL;
773
    brick_req.input.input_len = 0;
774
    brick_req.dict.dict_val = NULL;
775
    brick_req.dict.dict_len = 0;
776

777
    frame = create_frame(this, this->ctx->pool);
778
    if (!frame) {
779
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_FRAME_CREATE_FAIL,
780
                NULL);
781
        goto err;
782
    }
783

784
    if (op == GLUSTERD_SVC_ATTACH) {
785
        dict = dict_new();
786
        if (!dict) {
787
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
788
                    NULL);
789
            ret = -ENOMEM;
790
            goto err;
791
        }
792

793
        (void)build_volfile_path(volfile_id, path, sizeof(path), NULL, dict);
794

795
        ret = sys_stat(path, &stbuf);
796
        if (ret < 0) {
797
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
798
                   "Unable to stat %s (%s)", path, strerror(errno));
799
            ret = -EINVAL;
800
            goto err;
801
        }
802

803
        file_len = stbuf.st_size;
804
        volfile_content = GF_MALLOC(file_len + 1, gf_common_mt_char);
805
        if (!volfile_content) {
806
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
807
            ret = -ENOMEM;
808
            goto err;
809
        }
810
        spec_fd = open(path, O_RDONLY);
811
        if (spec_fd < 0) {
812
            gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
813
                   "failed to read volfile %s", path);
814
            ret = -EIO;
815
            goto err;
816
        }
817
        ret = sys_read(spec_fd, volfile_content, file_len);
818
        if (ret == file_len) {
819
            brick_req.input.input_val = volfile_content;
820
            brick_req.input.input_len = file_len;
821
        } else {
822
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_ATTACH_FAIL,
823
                   "read failed on path %s. File size=%" GF_PRI_SIZET
824
                   "read size=%d",
825
                   path, file_len, ret);
826
            ret = -EIO;
827
            goto err;
828
        }
829
        if (dict->count > 0) {
830
            ret = dict_allocate_and_serialize(dict, &brick_req.dict.dict_val,
831
                                              &brick_req.dict.dict_len);
832
            if (ret) {
833
                gf_smsg(this->name, GF_LOG_ERROR, errno,
834
                        GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
835
                goto err;
836
            }
837
        }
838

839
        frame->cookie = svc;
840
        frame->local = GF_CALLOC(1, sizeof(int), gf_gld_mt_int);
841
        *((int *)frame->local) = flags;
842
        cbkfn = glusterd_svc_attach_cbk;
843
    }
844

845
    req_size = xdr_sizeof((xdrproc_t)xdr_gd1_mgmt_brick_op_req, req);
846
    iobuf = iobuf_get2(rpc->ctx->iobuf_pool, req_size);
847
    if (!iobuf) {
848
        goto err;
849
    }
850

851
    iov.iov_base = iobuf->ptr;
852
    iov.iov_len = iobuf_pagesize(iobuf);
853

854
    iobref = iobref_new();
855
    if (!iobref) {
856
        goto err;
857
    }
858

859
    iobref_add(iobref, iobuf);
860

861
    /* Create the xdr payload */
862
    ret = xdr_serialize_generic(iov, req, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
863
    if (ret == -1) {
864
        goto err;
865
    }
866
    iov.iov_len = ret;
867

868
    /* Send the msg */
869
    GF_ATOMIC_INC(conf->blockers);
870
    ret = rpc_clnt_submit(rpc, &gd_brick_prog, op, cbkfn, &iov, 1, NULL, 0,
871
                          iobref, frame, NULL, 0, NULL, 0, NULL);
872

873
    frame = NULL;
874
err:
875
    if (iobuf) {
876
        iobuf_unref(iobuf);
877
    }
878
    if (iobref) {
879
        iobref_unref(iobref);
880
    }
881
    if (dict)
882
        dict_unref(dict);
883
    if (brick_req.dict.dict_val)
884
        GF_FREE(brick_req.dict.dict_val);
885

886
    GF_FREE(volfile_content);
887
    if (spec_fd >= 0)
888
        sys_close(spec_fd);
889
    if (frame && ret)
890
        STACK_DESTROY(frame->root);
891
    return ret;
892
}
893

894
static gf_boolean_t
895
glusterd_volume_exists(const char *volname)
896
{
897
    glusterd_volinfo_t *tmp_volinfo = NULL;
898
    gf_boolean_t volume_found = _gf_false;
899
    xlator_t *this = THIS;
900
    glusterd_conf_t *priv = NULL;
901

902
    GF_ASSERT(volname);
903

904
    priv = this->private;
905
    GF_ASSERT(priv);
906

907
    cds_list_for_each_entry(tmp_volinfo, &priv->volumes, vol_list)
908
    {
909
        if (!strcmp(tmp_volinfo->volname, volname)) {
910
            gf_msg_debug(this->name, 0, "Volume %s found", volname);
911
            volume_found = _gf_true;
912
            break;
913
        }
914
    }
915

916
    return volume_found;
917
}
918

919
int
920
glusterd_attach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int flags)
921
{
922
    glusterd_conf_t *conf = THIS->private;
923
    int ret = -1;
924
    int tries;
925
    rpc_clnt_t *rpc = NULL;
926

927
    GF_VALIDATE_OR_GOTO("glusterd", conf, out);
928
    GF_VALIDATE_OR_GOTO("glusterd", svc, out);
929
    GF_VALIDATE_OR_GOTO("glusterd", volinfo, out);
930

931
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_ATTACH_INFO,
932
           "adding svc %s (volume=%s) to existing "
933
           "process with pid %d",
934
           svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
935

936
    rpc = rpc_clnt_ref(svc->conn.rpc);
937
    for (tries = 15; tries > 0; --tries) {
938
        /* There might be a case that the volume for which we're attempting to
939
         * attach a shd svc might become stale and in the process of deletion.
940
         * Given that the volinfo object is being already passed here before
941
         * that sequence of operation has happened we might be operating on a
942
         * stale volume. At every sync task switch we should check for existance
943
         * of the volume now
944
         */
945
        if (!glusterd_volume_exists(volinfo->volname)) {
946
            gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_ATTACH_FAIL,
947
                   "Volume %s "
948
                   " is marked as stale, not attempting further shd svc attach "
949
                   "attempts",
950
                   volinfo->volname);
951
            ret = 0;
952
            goto out;
953
        }
954
        if (rpc) {
955
            pthread_mutex_lock(&conf->attach_lock);
956
            {
957
                ret = __glusterd_send_svc_configure_req(
958
                    svc, flags, rpc, svc->proc.volfileid, GLUSTERD_SVC_ATTACH);
959
            }
960
            pthread_mutex_unlock(&conf->attach_lock);
961
            if (!ret) {
962
                volinfo->shd.attached = _gf_true;
963
                goto out;
964
            }
965
        }
966
        /*
967
         * It might not actually be safe to manipulate the lock
968
         * like this, but if we don't then the connection can
969
         * never actually complete and retries are useless.
970
         * Unfortunately, all of the alternatives (e.g. doing
971
         * all of this in a separate thread) are much more
972
         * complicated and risky.
973
         * TBD: see if there's a better way
974
         */
975
        synclock_unlock(&conf->big_lock);
976
        synctask_sleep(1);
977
        synclock_lock(&conf->big_lock);
978
    }
979
    ret = -1;
980
    gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_ATTACH_FAIL,
981
           "attach failed for %s(volume=%s)", svc->name, volinfo->volname);
982
out:
983
    if (rpc)
984
        rpc_clnt_unref(rpc);
985
    return ret;
986
}
987

988
int
989
glusterd_detach_svc(glusterd_svc_t *svc, glusterd_volinfo_t *volinfo, int sig)
990
{
991
    glusterd_conf_t *conf = THIS->private;
992
    int ret = -1;
993
    int tries;
994
    rpc_clnt_t *rpc = NULL;
995

996
    GF_VALIDATE_OR_GOTO(THIS->name, conf, out);
997
    GF_VALIDATE_OR_GOTO(THIS->name, svc, out);
998
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
999

1000
    gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DETACH_INFO,
1001
           "removing svc %s (volume=%s) from existing "
1002
           "process with pid %d",
1003
           svc->name, volinfo->volname, glusterd_proc_get_pid(&svc->proc));
1004

1005
    rpc = rpc_clnt_ref(svc->conn.rpc);
1006
    for (tries = 15; tries > 0; --tries) {
1007
        if (rpc) {
1008
            /*For detach there is no flags, and we are not using sig.*/
1009
            pthread_mutex_lock(&conf->attach_lock);
1010
            {
1011
                ret = __glusterd_send_svc_configure_req(svc, 0, svc->conn.rpc,
1012
                                                        svc->proc.volfileid,
1013
                                                        GLUSTERD_SVC_DETACH);
1014
            }
1015
            pthread_mutex_unlock(&conf->attach_lock);
1016
            if (!ret) {
1017
                goto out;
1018
            }
1019
        }
1020
        /*
1021
         * It might not actually be safe to manipulate the lock
1022
         * like this, but if we don't then the connection can
1023
         * never actually complete and retries are useless.
1024
         * Unfortunately, all of the alternatives (e.g. doing
1025
         * all of this in a separate thread) are much more
1026
         * complicated and risky.
1027
         * TBD: see if there's a better way
1028
         */
1029
        synclock_unlock(&conf->big_lock);
1030
        synctask_sleep(1);
1031
        synclock_lock(&conf->big_lock);
1032
    }
1033
    ret = -1;
1034
    gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_SVC_DETACH_FAIL,
1035
           "detach failed for %s(volume=%s)", svc->name, volinfo->volname);
1036
out:
1037
    if (rpc)
1038
        rpc_clnt_unref(rpc);
1039
    return ret;
1040
}
1041

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.