cubefs
189 строк · 5.7 Кб
1// Copyright 2022 The CubeFS Authors.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12// implied. See the License for the specific language governing
13// permissions and limitations under the License.
14
15package clustermgr
16
17import (
18"context"
19"errors"
20"fmt"
21
22"github.com/cubefs/cubefs/blobstore/api/blobnode"
23"github.com/cubefs/cubefs/blobstore/common/proto"
24"github.com/cubefs/cubefs/blobstore/common/rpc"
25)
26
27type DiskInfoArgs struct {
28DiskID proto.DiskID `json:"disk_id"`
29}
30
31type DiskIDAllocRet struct {
32DiskID proto.DiskID `json:"disk_id"`
33}
34
35type DiskSetArgs struct {
36DiskID proto.DiskID `json:"disk_id"`
37Status proto.DiskStatus `json:"status"`
38}
39
40type ListOptionArgs struct {
41Idc string `json:"idc,omitempty"`
42Rack string `json:"rack,omitempty"`
43Host string `json:"host,omitempty"`
44Status proto.DiskStatus `json:"status,omitempty"`
45// list disk info after marker
46Marker proto.DiskID `json:"marker,omitempty"`
47// one page count
48Count int `json:"count,omitempty"`
49}
50
51type ListDiskRet struct {
52Disks []*blobnode.DiskInfo `json:"disks"`
53Marker proto.DiskID `json:"marker"`
54}
55
56type DisksHeartbeatArgs struct {
57Disks []*blobnode.DiskHeartBeatInfo `json:"disks"`
58}
59
60type DisksHeartbeatRet struct {
61Disks []*DiskHeartbeatRet `json:"disks"`
62}
63
64type DiskHeartbeatRet struct {
65DiskID proto.DiskID `json:"disk_id"`
66Status proto.DiskStatus `json:"status"`
67ReadOnly bool `json:"read_only"`
68}
69
70type DiskStatInfo struct {
71IDC string `json:"idc"`
72Total int `json:"total"`
73TotalChunk int64 `json:"total_chunk"`
74TotalFreeChunk int64 `json:"total_free_chunk"`
75Available int `json:"available"`
76Readonly int `json:"readonly"`
77Expired int `json:"expired"`
78Broken int `json:"broken"`
79Repairing int `json:"repairing"`
80Repaired int `json:"repaired"`
81Dropping int `json:"dropping"`
82Dropped int `json:"dropped"`
83}
84
85type SpaceStatInfo struct {
86TotalSpace int64 `json:"total_space"`
87FreeSpace int64 `json:"free_space"`
88UsedSpace int64 `json:"used_space"`
89WritableSpace int64 `json:"writable_space"`
90TotalBlobNode int64 `json:"total_blob_node"`
91TotalDisk int64 `json:"total_disk"`
92DisksStatInfos []DiskStatInfo `json:"disk_stat_infos"`
93}
94
95type DiskAccessArgs struct {
96DiskID proto.DiskID `json:"disk_id"`
97Readonly bool `json:"readonly"`
98}
99
100// DiskIDAlloc alloc diskID from cluster manager
101func (c *Client) AllocDiskID(ctx context.Context) (proto.DiskID, error) {
102ret := &DiskIDAllocRet{}
103err := c.PostWith(ctx, "/diskid/alloc", ret, rpc.NoneBody)
104if err != nil {
105return 0, err
106}
107return ret.DiskID, nil
108}
109
110// DiskInfo get disk info from cluster manager
111func (c *Client) DiskInfo(ctx context.Context, id proto.DiskID) (ret *blobnode.DiskInfo, err error) {
112ret = &blobnode.DiskInfo{}
113err = c.GetWith(ctx, "/disk/info?disk_id="+id.ToString(), ret)
114return
115}
116
117// AddDisk add/register a new disk into cluster manager
118func (c *Client) AddDisk(ctx context.Context, info *blobnode.DiskInfo) (err error) {
119err = c.PostWith(ctx, "/disk/add", nil, info)
120return
121}
122
123// SetDisk set disk status
124func (c *Client) SetDisk(ctx context.Context, id proto.DiskID, status proto.DiskStatus) (err error) {
125if !status.IsValid() {
126return errors.New("invalid status")
127}
128return c.PostWith(ctx, "/disk/set", nil, &DiskSetArgs{DiskID: id, Status: status})
129}
130
131// ListHostDisk list specified host disk info from cluster manager
132func (c *Client) ListHostDisk(ctx context.Context, host string) (ret []*blobnode.DiskInfo, err error) {
133listRet := ListDiskRet{}
134opt := &ListOptionArgs{Host: host, Count: 200}
135for {
136listRet, err = c.ListDisk(ctx, opt)
137if err != nil || len(listRet.Disks) == 0 {
138return
139}
140opt.Marker = listRet.Marker
141ret = append(ret, listRet.Disks...)
142}
143}
144
145// ListDisk list disk info from cluster manager
146// when ListOptionArgs is default value, defalut return 10 diskInfos
147func (c *Client) ListDisk(ctx context.Context, options *ListOptionArgs) (ret ListDiskRet, err error) {
148err = c.GetWith(ctx, fmt.Sprintf(
149"/disk/list?idc=%s&rack=%s&host=%s&status=%d&marker=%d&count=%d",
150options.Idc,
151options.Rack,
152options.Host,
153options.Status,
154options.Marker,
155options.Count,
156), &ret)
157return
158}
159
160// HeartbeatDisk report blobnode disk latest capacity info to cluster manager
161func (c *Client) HeartbeatDisk(ctx context.Context, infos []*blobnode.DiskHeartBeatInfo) (ret []*DiskHeartbeatRet, err error) {
162result := &DisksHeartbeatRet{}
163args := &DisksHeartbeatArgs{Disks: infos}
164err = c.PostWith(ctx, "/disk/heartbeat", result, args)
165ret = result.Disks
166return
167}
168
169func (c *Client) DropDisk(ctx context.Context, id proto.DiskID) (err error) {
170err = c.PostWith(ctx, "/disk/drop", nil, &DiskInfoArgs{DiskID: id})
171return
172}
173
174func (c *Client) DroppedDisk(ctx context.Context, id proto.DiskID) (err error) {
175err = c.PostWith(ctx, "/disk/dropped", nil, &DiskInfoArgs{DiskID: id})
176return
177}
178
179func (c *Client) ListDroppingDisk(ctx context.Context) (ret []*blobnode.DiskInfo, err error) {
180result := &ListDiskRet{}
181err = c.GetWith(ctx, "/disk/droppinglist", result)
182ret = result.Disks
183return
184}
185
186func (c *Client) SetReadonlyDisk(ctx context.Context, id proto.DiskID, readonly bool) (err error) {
187err = c.PostWith(ctx, "/disk/access", nil, &DiskAccessArgs{DiskID: id, Readonly: readonly})
188return
189}
190