embox

Форк
0
/
build.c 
370 строк · 10.7 Кб
1
/*
2
 * JFFS2 -- Journalling Flash File System, Version 2.
3
 *
4
 * Copyright (C) 2001-2003 Red Hat, Inc.
5
 *
6
 * Created by David Woodhouse <dwmw2@infradead.org>
7
 *
8
 * For licensing information, see the file 'LICENCE' in this directory.
9
 *
10
 * $Id: build.c,v 1.75 2005/07/22 10:32:07 dedekind Exp $
11
 *
12
 */
13

14
#include <dirent.h>
15
#include <stddef.h>
16
#include <errno.h>
17
#include <stdint.h>
18

19
#include <linux/kernel.h>
20
#include <linux/sched.h>
21
#include <linux/slab.h>
22
#include <linux/vmalloc.h>
23
#include <linux/mtd/mtd.h>
24
#include "nodelist.h"
25

26
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
27

28
static inline struct jffs2_inode_cache *
29
first_inode_chain(int *i, struct jffs2_sb_info *c) {
30
	for (; *i < INOCACHE_HASHSIZE; (*i)++) {
31
		if (c->inocache_list[*i])
32
			return c->inocache_list[*i];
33
	}
34
	return NULL;
35
}
36

37
static inline struct jffs2_inode_cache *
38
next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) {
39
	/* More in this chain? */
40
	if (ic->next)
41
		return ic->next;
42
	(*i)++;
43
	return first_inode_chain(i, c);
44
}
45

46
#define for_each_inode(i, c, ic)			\
47
	for (i = 0, ic = first_inode_chain(&i, (c));	\
48
	     ic;					\
49
	     ic = next_inode(&i, ic, (c)))
50

51

52
static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
53
										struct jffs2_inode_cache *ic) {
54
	struct jffs2_full_dirent *fd;
55

56
	D1(printk( "jffs2_build_inode building directory inode #%u\n", ic->ino));
57

58
	/* For each child, increase nlink */
59
	for(fd = ic->scan_dents; fd; fd = fd->next) {
60
		struct jffs2_inode_cache *child_ic;
61
		if (!fd->ino) {
62
			continue;
63
		}
64
		/* XXX: Can get high latency here with huge directories */
65

66
		child_ic = jffs2_get_ino_cache(c, fd->ino);
67
		if (!child_ic) {
68
			printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
69
				  fd->name, fd->ino, ic->ino);
70
			jffs2_mark_node_obsolete(c, fd->raw);
71
			continue;
72
		}
73

74
		if (child_ic->nlink++ && fd->type == DT_DIR) {
75
			printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
76
			if (fd->ino == 1 && ic->ino == 1) {
77
				printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
78
				printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
79
			}
80
			/* What do we do about it? */
81
		}
82
		D1(printk( "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
83
		/* Can't free them. We might need them in pass 2 */
84
	}
85
}
86

87
/* Scan plan:
88
 * - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
89
 * - Scan directory tree from top down, setting nlink in inocaches
90
 * - Scan inocaches for inodes with nlink==0
91
 */
92
static int jffs2_build_filesystem(struct jffs2_sb_info *c) {
93
	int ret;
94
	int i;
95
	struct jffs2_inode_cache *ic;
96
	struct jffs2_full_dirent *fd;
97
	struct jffs2_full_dirent *dead_fds = NULL;
98

99
	/* First, scan the medium and build all the inode caches with
100
	 * lists of physical nodes
101
	 */
102

103
	c->flags |= JFFS2_SB_FLAG_SCANNING;
104
	ret = jffs2_scan_medium(c);
105
	c->flags &= ~JFFS2_SB_FLAG_SCANNING;
106
	if (ret) {
107
		goto exit;
108
	}
109

110
	D1(printk( "Scanned flash completely\n"));
111
	jffs2_dbg_dump_block_lists_nolock(c);
112

113
	c->flags |= JFFS2_SB_FLAG_BUILDING;
114
	/* Now scan the directory tree, increasing nlink according to every dirent found. */
115
	for_each_inode(i, c, ic) {
116
		D1(printk( "Pass 1: ino #%u\n", ic->ino));
117

118
		D1(BUG_ON(ic->ino > c->highest_ino));
119

120
		if (ic->scan_dents) {
121
			jffs2_build_inode_pass1(c, ic);
122
			cond_resched();
123
		}
124
	}
125

126
	D1(printk( "Pass 1 complete\n"));
127

128
	/* Next, scan for inodes with nlink == 0 and remove them. If
129
	 * they were directories, then decrement the nlink of their
130
	 * children too, and repeat the scan. As that's going to be
131
	 * a fairly uncommon occurrence, it's not so evil to do it this
132
	 * way. Recursion bad.
133
	 */
134
	D1(printk( "Pass 2 starting\n"));
135

136
	for_each_inode(i, c, ic) {
137
		D1(printk( "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
138
		if (ic->nlink) {
139
			continue;
140
		}
141

142
		jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
143
		cond_resched();
144
	}
145

146
	D1(printk( "Pass 2a starting\n"));
147

148
	while (dead_fds) {
149
		fd = dead_fds;
150
		dead_fds = fd->next;
151

152
		ic = jffs2_get_ino_cache(c, fd->ino);
153
		D1(printk( "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
154

155
		if (ic) {
156
			jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
157
		}
158
		jffs2_free_full_dirent(fd);
159
	}
160

161
	D1(printk( "Pass 2 complete\n"));
162

163
	/* Finally, we can scan again and free the dirent structs */
164
	for_each_inode(i, c, ic) {
165
		D1(printk( "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
166

167
		while(ic->scan_dents) {
168
			fd = ic->scan_dents;
169
			ic->scan_dents = fd->next;
170
			jffs2_free_full_dirent(fd);
171
		}
172
		ic->scan_dents = NULL;
173
		cond_resched();
174
	}
175
	c->flags &= ~JFFS2_SB_FLAG_BUILDING;
176

177
	D1(printk( "Pass 3 complete\n"));
178
	jffs2_dbg_dump_block_lists_nolock(c);
179

180
	/* Rotate the lists by some number to ensure wear levelling */
181
	jffs2_rotate_lists(c);
182

183
	ret = 0;
184

185
exit:
186
	if (ret) {
187
		for_each_inode(i, c, ic) {
188
			while(ic->scan_dents) {
189
				fd = ic->scan_dents;
190
				ic->scan_dents = fd->next;
191
				jffs2_free_full_dirent(fd);
192
			}
193
		}
194
	}
195

196
	return ret;
197
}
198

199
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
200
		struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) {
201
	struct jffs2_raw_node_ref *raw;
202
	struct jffs2_full_dirent *fd;
203

204
	D1(printk( "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
205

206
	raw = ic->nodes;
207
	while (raw != (void *)ic) {
208
		struct jffs2_raw_node_ref *next = raw->next_in_ino;
209
		D1(printk( "obsoleting node at 0x%08x\n", ref_offset(raw)));
210
		jffs2_mark_node_obsolete(c, raw);
211
		raw = next;
212
	}
213

214
	if (ic->scan_dents) {
215
		int whinged = 0;
216
		D1(printk( "Inode #%u was a directory which may have children...\n", ic->ino));
217

218
		while(ic->scan_dents) {
219
			struct jffs2_inode_cache *child_ic;
220

221
			fd = ic->scan_dents;
222
			ic->scan_dents = fd->next;
223

224
			if (!fd->ino) {
225
				/* It's a deletion dirent. Ignore it */
226
				D1(printk( "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
227
				jffs2_free_full_dirent(fd);
228
				continue;
229
			}
230
			if (!whinged) {
231
				whinged = 1;
232
				printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
233
			}
234

235
			D1(printk( "Removing child \"%s\", ino #%u\n",
236
				  fd->name, fd->ino));
237

238
			child_ic = jffs2_get_ino_cache(c, fd->ino);
239
			if (!child_ic) {
240
				printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
241
				jffs2_free_full_dirent(fd);
242
				continue;
243
			}
244

245
			/* Reduce nlink of the child. If it's now zero, stick it on the
246
			 * dead_fds list to be cleaned up later. Else just free the fd
247
			 */
248

249
			child_ic->nlink--;
250

251
			if (!child_ic->nlink) {
252
				D1(printk( "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
253
					  fd->ino, fd->name));
254
				fd->next = *dead_fds;
255
				*dead_fds = fd;
256
			} else {
257
				D1(printk( "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
258
					  fd->ino, fd->name, child_ic->nlink));
259
				jffs2_free_full_dirent(fd);
260
			}
261
		}
262
	}
263

264
	/* We don't delete the inocache from the hash list and free it yet.
265
	 * The erase code will do that, when all the nodes are completely gone.
266
	 */
267
}
268

269
static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) {
270
	uint32_t size;
271

272
	/* Deletion should almost _always_ be allowed. We're fairly
273
	 * buggered once we stop allowing people to delete stuff
274
	 * because there's not enough free space...
275
	 */
276
	c->resv_blocks_deletion = 2;
277

278
	/* Be conservative about how much space we need before we allow writes.
279
	 * On top of that which is required for deletia, require an extra 2%
280
	 * of the medium to be available, for overhead caused by nodes being
281
	 * split across blocks, etc.
282
	 */
283

284
	size = c->flash_size / 50; /* 2% of flash size */
285
	size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
286
	size += c->sector_size - 1; /* ... and round up */
287

288
	c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
289

290
	/* When do we let the GC thread run in the background */
291

292
	c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
293

294
	/* When do we allow garbage collection to merge nodes to make
295
	   long-term progress at the expense of short-term space exhaustion? */
296
	c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
297

298
	/* When do we allow garbage collection to eat from bad blocks rather
299
	   than actually making progress? */
300
	c->resv_blocks_gcbad = 0;
301

302
	/* If there's less than this amount of dirty space, don't bother
303
	   trying to GC to make more space. It'll be a fruitless task */
304
	c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
305

306
	D1(printk( "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
307
		  c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
308
	D1(printk( "Blocks required to allow deletion:    %d (%d KiB)\n",
309
		  c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
310
	D1(printk( "Blocks required to allow writes:      %d (%d KiB)\n",
311
		  c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
312
	D1(printk( "Blocks required to quiesce GC thread: %d (%d KiB)\n",
313
		  c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
314
	D1(printk( "Blocks required to allow GC merges:   %d (%d KiB)\n",
315
		  c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
316
	D1(printk( "Blocks required to GC bad blocks:     %d (%d KiB)\n",
317
		  c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
318
	D1(printk( "Amount of dirty space required to GC: %d bytes\n",
319
		  c->nospc_dirty_size));
320
}
321

322
int jffs2_do_mount_fs(struct jffs2_sb_info *c) {
323
	int i;
324

325
	c->free_size = c->flash_size;
326
	c->nr_blocks = c->flash_size / c->sector_size;
327

328
	c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
329
	if (!c->blocks) {
330
		return -ENOMEM;
331
	}
332
	for (i=0; i<c->nr_blocks; i++) {
333
		INIT_LIST_HEAD(&c->blocks[i].list);
334
		c->blocks[i].offset = i * c->sector_size;
335
		c->blocks[i].free_size = c->sector_size;
336
		c->blocks[i].dirty_size = 0;
337
		c->blocks[i].wasted_size = 0;
338
		c->blocks[i].unchecked_size = 0;
339
		c->blocks[i].used_size = 0;
340
		c->blocks[i].first_node = NULL;
341
		c->blocks[i].last_node = NULL;
342
		c->blocks[i].bad_count = 0;
343
	}
344

345
	INIT_LIST_HEAD(&c->clean_list);
346
	INIT_LIST_HEAD(&c->very_dirty_list);
347
	INIT_LIST_HEAD(&c->dirty_list);
348
	INIT_LIST_HEAD(&c->erasable_list);
349
	INIT_LIST_HEAD(&c->erasing_list);
350
	INIT_LIST_HEAD(&c->erase_pending_list);
351
	INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
352
	INIT_LIST_HEAD(&c->erase_complete_list);
353
	INIT_LIST_HEAD(&c->free_list);
354
	INIT_LIST_HEAD(&c->bad_list);
355
	INIT_LIST_HEAD(&c->bad_used_list);
356
	c->highest_ino = 1;
357

358
	if (jffs2_build_filesystem(c)) {
359
		D1(printk( "build_fs failed\n"));
360
		jffs2_free_ino_caches(c);
361
		jffs2_free_raw_node_refs(c);
362
		kfree(c->blocks);
363

364
		return -EIO;
365
	}
366

367
	jffs2_calc_trigger_levels(c);
368

369
	return 0;
370
}
371

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.