cubefs

Форк
0
291 строка · 6.9 Кб
1
// Copyright 2022 The CubeFS Authors.
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
// implied. See the License for the specific language governing
13
// permissions and limitations under the License.
14

15
package blobnode
16

17
import (
18
	"context"
19
	"encoding/binary"
20
	"encoding/hex"
21
	"errors"
22
	"fmt"
23
	"time"
24

25
	bloberr "github.com/cubefs/cubefs/blobstore/common/errors"
26
	"github.com/cubefs/cubefs/blobstore/common/proto"
27
	"github.com/cubefs/cubefs/blobstore/common/rpc"
28
)
29

30
const (
31
	ChunkStatusDefault  ChunkStatus = iota // 0
32
	ChunkStatusNormal                      // 1
33
	ChunkStatusReadOnly                    // 2
34
	ChunkStatusRelease                     // 3
35
	ChunkNumStatus                         // 4
36
)
37

38
const (
39
	ReleaseForUser    = "release for user"
40
	ReleaseForCompact = "release for compact"
41
)
42

43
// Chunk ID
44
// vuid + timestamp
45
const (
46
	chunkVuidLen      = 8
47
	chunkTimestmapLen = 8
48
	ChunkIdLength     = chunkVuidLen + chunkTimestmapLen
49
)
50

51
var InvalidChunkId ChunkId = [ChunkIdLength]byte{}
52

53
var (
54
	_vuidHexLen      = hex.EncodedLen(chunkVuidLen)
55
	_timestampHexLen = hex.EncodedLen(chunkTimestmapLen)
56

57
	// ${vuid_hex}-${tiemstamp_hex}
58
	// |-- 8 Bytes --|-- 1 Bytes --|-- 8 Bytes --|
59
	delimiter        = []byte("-")
60
	ChunkIdEncodeLen = _vuidHexLen + _timestampHexLen + len(delimiter)
61
)
62

63
type (
64
	ChunkId     [ChunkIdLength]byte
65
	ChunkStatus uint8
66
)
67

68
func (c ChunkId) UnixTime() uint64 {
69
	return binary.BigEndian.Uint64(c[chunkVuidLen:ChunkIdLength])
70
}
71

72
func (c ChunkId) VolumeUnitId() proto.Vuid {
73
	return proto.Vuid(binary.BigEndian.Uint64(c[:chunkVuidLen]))
74
}
75

76
func (c *ChunkId) Marshal() ([]byte, error) {
77
	buf := make([]byte, ChunkIdEncodeLen)
78
	var i int
79

80
	hex.Encode(buf[i:_vuidHexLen], c[:chunkVuidLen])
81
	i += _vuidHexLen
82

83
	copy(buf[i:i+len(delimiter)], delimiter)
84
	i += len(delimiter)
85

86
	hex.Encode(buf[i:], c[chunkVuidLen:ChunkIdLength])
87

88
	return buf, nil
89
}
90

91
func (c *ChunkId) Unmarshal(data []byte) error {
92
	if len(data) != ChunkIdEncodeLen {
93
		panic(errors.New("chunk buf size not match"))
94
	}
95

96
	var i int
97

98
	_, err := hex.Decode(c[:chunkVuidLen], data[i:_vuidHexLen])
99
	if err != nil {
100
		return err
101
	}
102

103
	i += _vuidHexLen
104
	i += len(delimiter)
105

106
	_, err = hex.Decode(c[chunkVuidLen:], data[i:])
107
	if err != nil {
108
		return err
109
	}
110

111
	return nil
112
}
113

114
func (c ChunkId) String() string {
115
	buf, _ := c.Marshal()
116
	return string(buf[:])
117
}
118

119
func (c ChunkId) MarshalJSON() ([]byte, error) {
120
	b := make([]byte, ChunkIdEncodeLen+2)
121
	b[0], b[ChunkIdEncodeLen+1] = '"', '"'
122

123
	buf, _ := c.Marshal()
124
	copy(b[1:], buf)
125

126
	return b, nil
127
}
128

129
func (c *ChunkId) UnmarshalJSON(data []byte) (err error) {
130
	if len(data) != ChunkIdEncodeLen+2 {
131
		return errors.New("failed unmarshal json")
132
	}
133

134
	return c.Unmarshal(data[1 : ChunkIdEncodeLen+1])
135
}
136

137
func EncodeChunk(id ChunkId) string {
138
	return id.String()
139
}
140

141
func NewChunkId(vuid proto.Vuid) (chunkId ChunkId) {
142
	binary.BigEndian.PutUint64(chunkId[:chunkVuidLen], uint64(vuid))
143
	binary.BigEndian.PutUint64(chunkId[chunkVuidLen:ChunkIdLength], uint64(time.Now().UnixNano()))
144
	return
145
}
146

147
func IsValidDiskID(id proto.DiskID) bool {
148
	return id != proto.InvalidDiskID
149
}
150

151
func IsValidChunkId(id ChunkId) bool {
152
	return id != InvalidChunkId
153
}
154

155
func IsValidChunkStatus(status ChunkStatus) bool {
156
	return status < ChunkNumStatus
157
}
158

159
func DecodeChunk(name string) (id ChunkId, err error) {
160
	buf := []byte(name)
161
	if len(buf) != ChunkIdEncodeLen {
162
		return InvalidChunkId, errors.New("invalid chunk name")
163
	}
164

165
	if err = id.Unmarshal(buf); err != nil {
166
		return InvalidChunkId, errors.New("chunk unmarshal failed")
167
	}
168

169
	return
170
}
171

172
type CreateChunkArgs struct {
173
	DiskID    proto.DiskID `json:"diskid"`
174
	Vuid      proto.Vuid   `json:"vuid"`
175
	ChunkSize int64        `json:"chunksize,omitempty"`
176
}
177

178
func (c *client) CreateChunk(ctx context.Context, host string, args *CreateChunkArgs) (err error) {
179
	if !IsValidDiskID(args.DiskID) {
180
		err = bloberr.ErrInvalidDiskId
181
		return
182
	}
183

184
	urlStr := fmt.Sprintf("%v/chunk/create/diskid/%v/vuid/%v?chunksize=%v",
185
		host, args.DiskID, args.Vuid, args.ChunkSize)
186

187
	err = c.PostWith(ctx, urlStr, nil, rpc.NoneBody)
188
	return
189
}
190

191
type StatChunkArgs struct {
192
	DiskID proto.DiskID `json:"diskid"`
193
	Vuid   proto.Vuid   `json:"vuid"`
194
}
195

196
func (c *client) StatChunk(ctx context.Context, host string, args *StatChunkArgs) (ci *ChunkInfo, err error) {
197
	if !IsValidDiskID(args.DiskID) {
198
		err = bloberr.ErrInvalidDiskId
199
		return
200
	}
201

202
	urlStr := fmt.Sprintf("%v/chunk/stat/diskid/%v/vuid/%v", host, args.DiskID, args.Vuid)
203
	ci = new(ChunkInfo)
204
	err = c.GetWith(ctx, urlStr, ci)
205
	return
206
}
207

208
type ChangeChunkStatusArgs struct {
209
	DiskID proto.DiskID `json:"diskid"`
210
	Vuid   proto.Vuid   `json:"vuid"`
211
	Force  bool         `json:"force,omitempty"`
212
}
213

214
type ChunkInspectArgs struct {
215
	DiskID proto.DiskID `json:"diskid"`
216
	Vuid   proto.Vuid   `json:"vuid"`
217
}
218

219
func (c *client) ReleaseChunk(ctx context.Context, host string, args *ChangeChunkStatusArgs) (err error) {
220
	if !IsValidDiskID(args.DiskID) {
221
		err = bloberr.ErrInvalidDiskId
222
		return
223
	}
224

225
	urlStr := fmt.Sprintf("%v/chunk/release/diskid/%v/vuid/%v?force=%v", host, args.DiskID, args.Vuid, args.Force)
226
	err = c.PostWith(ctx, urlStr, nil, rpc.NoneBody)
227
	return
228
}
229

230
func (c *client) SetChunkReadonly(ctx context.Context, host string, args *ChangeChunkStatusArgs) (err error) {
231
	if !IsValidDiskID(args.DiskID) {
232
		err = bloberr.ErrInvalidDiskId
233
		return
234
	}
235

236
	urlStr := fmt.Sprintf("%v/chunk/readonly/diskid/%v/vuid/%v", host, args.DiskID, args.Vuid)
237

238
	err = c.PostWith(ctx, urlStr, nil, rpc.NoneBody)
239
	return
240
}
241

242
func (c *client) SetChunkReadwrite(ctx context.Context, host string, args *ChangeChunkStatusArgs) (err error) {
243
	if !IsValidDiskID(args.DiskID) {
244
		err = bloberr.ErrInvalidDiskId
245
		return
246
	}
247

248
	urlStr := fmt.Sprintf("%v/chunk/readwrite/diskid/%v/vuid/%v", host, args.DiskID, args.Vuid)
249
	err = c.PostWith(ctx, urlStr, nil, rpc.NoneBody)
250
	return
251
}
252

253
type ListChunkArgs struct {
254
	DiskID proto.DiskID `json:"diskid"`
255
}
256

257
type ListChunkRet struct {
258
	ChunkInfos []*ChunkInfo `json:"chunk_infos"`
259
}
260

261
func (c *client) ListChunks(ctx context.Context, host string, args *ListChunkArgs) (ret []*ChunkInfo, err error) {
262
	if !IsValidDiskID(args.DiskID) {
263
		err = bloberr.ErrInvalidDiskId
264
		return
265
	}
266

267
	urlStr := fmt.Sprintf("%v/chunk/list/diskid/%v", host, args.DiskID)
268

269
	listRet := &ListChunkRet{}
270
	err = c.GetWith(ctx, urlStr, listRet)
271
	if err != nil {
272
		return nil, err
273
	}
274

275
	return listRet.ChunkInfos, nil
276
}
277

278
type CompactChunkArgs struct {
279
	DiskID proto.DiskID `json:"diskid"`
280
	Vuid   proto.Vuid   `json:"vuid"`
281
}
282

283
type DiskProbeArgs struct {
284
	Path string `json:"path"`
285
}
286

287
type BadShard struct {
288
	DiskID proto.DiskID
289
	Vuid   proto.Vuid
290
	Bid    proto.BlobID
291
}
292

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.