summaryrefslogtreecommitdiffstats
path: root/qemu/roms/u-boot/fs
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@intel.com>2015-08-28 09:58:54 +0800
committerYang Zhang <yang.z.zhang@intel.com>2015-09-01 12:44:00 +0800
commite44e3482bdb4d0ebde2d8b41830ac2cdb07948fb (patch)
tree66b09f592c55df2878107a468a91d21506104d3f /qemu/roms/u-boot/fs
parent9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (diff)
Add qemu 2.4.0
Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5 Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
Diffstat (limited to 'qemu/roms/u-boot/fs')
-rw-r--r--qemu/roms/u-boot/fs/Makefile24
-rw-r--r--qemu/roms/u-boot/fs/cbfs/Makefile6
-rw-r--r--qemu/roms/u-boot/fs/cbfs/cbfs.c323
-rw-r--r--qemu/roms/u-boot/fs/cramfs/Makefile9
-rw-r--r--qemu/roms/u-boot/fs/cramfs/cramfs.c348
-rw-r--r--qemu/roms/u-boot/fs/cramfs/uncompress.c83
-rw-r--r--qemu/roms/u-boot/fs/ext4/Makefile13
-rw-r--r--qemu/roms/u-boot/fs/ext4/crc16.c62
-rw-r--r--qemu/roms/u-boot/fs/ext4/crc16.h16
-rw-r--r--qemu/roms/u-boot/fs/ext4/dev.c146
-rw-r--r--qemu/roms/u-boot/fs/ext4/ext4_common.c2250
-rw-r--r--qemu/roms/u-boot/fs/ext4/ext4_common.h78
-rw-r--r--qemu/roms/u-boot/fs/ext4/ext4_journal.c653
-rw-r--r--qemu/roms/u-boot/fs/ext4/ext4_journal.h125
-rw-r--r--qemu/roms/u-boot/fs/ext4/ext4_write.c977
-rw-r--r--qemu/roms/u-boot/fs/ext4/ext4fs.c228
-rw-r--r--qemu/roms/u-boot/fs/fat/Makefile11
-rw-r--r--qemu/roms/u-boot/fs/fat/fat.c1273
-rw-r--r--qemu/roms/u-boot/fs/fat/fat_write.c1106
-rw-r--r--qemu/roms/u-boot/fs/fat/file.c184
-rw-r--r--qemu/roms/u-boot/fs/fs.c399
-rw-r--r--qemu/roms/u-boot/fs/jffs2/LICENCE30
-rw-r--r--qemu/roms/u-boot/fs/jffs2/Makefile13
-rw-r--r--qemu/roms/u-boot/fs/jffs2/compr_lzo.c401
-rw-r--r--qemu/roms/u-boot/fs/jffs2/compr_rtime.c87
-rw-r--r--qemu/roms/u-boot/fs/jffs2/compr_rubin.c122
-rw-r--r--qemu/roms/u-boot/fs/jffs2/compr_zlib.c48
-rw-r--r--qemu/roms/u-boot/fs/jffs2/jffs2_1pass.c1865
-rw-r--r--qemu/roms/u-boot/fs/jffs2/jffs2_nand_1pass.c1030
-rw-r--r--qemu/roms/u-boot/fs/jffs2/jffs2_nand_private.h133
-rw-r--r--qemu/roms/u-boot/fs/jffs2/jffs2_private.h101
-rw-r--r--qemu/roms/u-boot/fs/jffs2/mini_inflate.c377
-rw-r--r--qemu/roms/u-boot/fs/jffs2/summary.h163
-rw-r--r--qemu/roms/u-boot/fs/reiserfs/Makefile12
-rw-r--r--qemu/roms/u-boot/fs/reiserfs/dev.c98
-rw-r--r--qemu/roms/u-boot/fs/reiserfs/mode_string.c125
-rw-r--r--qemu/roms/u-boot/fs/reiserfs/reiserfs.c972
-rw-r--r--qemu/roms/u-boot/fs/reiserfs/reiserfs_private.h508
-rw-r--r--qemu/roms/u-boot/fs/sandbox/Makefile13
-rw-r--r--qemu/roms/u-boot/fs/sandbox/sandboxfs.c111
-rw-r--r--qemu/roms/u-boot/fs/ubifs/Makefile15
-rw-r--r--qemu/roms/u-boot/fs/ubifs/budget.c113
-rw-r--r--qemu/roms/u-boot/fs/ubifs/crc16.c60
-rw-r--r--qemu/roms/u-boot/fs/ubifs/crc16.h29
-rw-r--r--qemu/roms/u-boot/fs/ubifs/debug.c156
-rw-r--r--qemu/roms/u-boot/fs/ubifs/debug.h392
-rw-r--r--qemu/roms/u-boot/fs/ubifs/io.c316
-rw-r--r--qemu/roms/u-boot/fs/ubifs/key.h557
-rw-r--r--qemu/roms/u-boot/fs/ubifs/log.c104
-rw-r--r--qemu/roms/u-boot/fs/ubifs/lprops.c842
-rw-r--r--qemu/roms/u-boot/fs/ubifs/lpt.c1105
-rw-r--r--qemu/roms/u-boot/fs/ubifs/lpt_commit.c171
-rw-r--r--qemu/roms/u-boot/fs/ubifs/master.c341
-rw-r--r--qemu/roms/u-boot/fs/ubifs/misc.h311
-rw-r--r--qemu/roms/u-boot/fs/ubifs/orphan.c316
-rw-r--r--qemu/roms/u-boot/fs/ubifs/recovery.c1225
-rw-r--r--qemu/roms/u-boot/fs/ubifs/replay.c1070
-rw-r--r--qemu/roms/u-boot/fs/ubifs/sb.c346
-rw-r--r--qemu/roms/u-boot/fs/ubifs/scan.c362
-rw-r--r--qemu/roms/u-boot/fs/ubifs/super.c1199
-rw-r--r--qemu/roms/u-boot/fs/ubifs/tnc.c2767
-rw-r--r--qemu/roms/u-boot/fs/ubifs/tnc_misc.c435
-rw-r--r--qemu/roms/u-boot/fs/ubifs/ubifs-media.h775
-rw-r--r--qemu/roms/u-boot/fs/ubifs/ubifs.c751
-rw-r--r--qemu/roms/u-boot/fs/ubifs/ubifs.h2154
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/Makefile29
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.c356
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.h30
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.c152
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.h28
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.c97
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.h33
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.c408
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.h33
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.c281
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.h44
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_error.c58
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_flashif.h35
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_flashif2.h35
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_getblockinfo.h35
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_guts.c5021
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_guts.h973
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.c165
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.h27
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.c232
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.h30
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.c208
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.h28
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nand.c120
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nand.h38
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nandemul2k.h39
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.c251
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.h65
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_osglue.h41
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.c56
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.h39
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.c197
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.h47
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_qsort.c141
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_summary.c309
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_summary.h37
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.c407
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.h36
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_trace.h57
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_uboot_glue.c465
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_verify.c526
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_verify.h43
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.c419
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.h22
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.c1526
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.h39
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffscfg.h38
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffsfs.c3217
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yaffsfs.h209
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/ydirectenv.h84
-rw-r--r--qemu/roms/u-boot/fs/yaffs2/yportenv.h309
-rw-r--r--qemu/roms/u-boot/fs/zfs/Makefile8
-rw-r--r--qemu/roms/u-boot/fs/zfs/dev.c112
-rw-r--r--qemu/roms/u-boot/fs/zfs/zfs.c2334
-rw-r--r--qemu/roms/u-boot/fs/zfs/zfs_fletcher.c75
-rw-r--r--qemu/roms/u-boot/fs/zfs/zfs_lzjb.c85
-rw-r--r--qemu/roms/u-boot/fs/zfs/zfs_sha256.c136
122 files changed, 50300 insertions, 0 deletions
diff --git a/qemu/roms/u-boot/fs/Makefile b/qemu/roms/u-boot/fs/Makefile
new file mode 100644
index 000000000..18221658f
--- /dev/null
+++ b/qemu/roms/u-boot/fs/Makefile
@@ -0,0 +1,24 @@
+#
+# (C) Copyright 2000-2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+# Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+ifdef CONFIG_SPL_BUILD
+obj-$(CONFIG_SPL_FAT_SUPPORT) += fat/
+else
+obj-y += fs.o
+
+obj-$(CONFIG_CMD_CBFS) += cbfs/
+obj-$(CONFIG_CMD_CRAMFS) += cramfs/
+obj-$(CONFIG_FS_EXT4) += ext4/
+obj-y += fat/
+obj-$(CONFIG_CMD_JFFS2) += jffs2/
+obj-$(CONFIG_CMD_REISER) += reiserfs/
+obj-$(CONFIG_SANDBOX) += sandbox/
+obj-$(CONFIG_CMD_UBIFS) += ubifs/
+obj-$(CONFIG_YAFFS2) += yaffs2/
+obj-$(CONFIG_CMD_ZFS) += zfs/
+endif
diff --git a/qemu/roms/u-boot/fs/cbfs/Makefile b/qemu/roms/u-boot/fs/cbfs/Makefile
new file mode 100644
index 000000000..a106e05dd
--- /dev/null
+++ b/qemu/roms/u-boot/fs/cbfs/Makefile
@@ -0,0 +1,6 @@
+# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := cbfs.o
diff --git a/qemu/roms/u-boot/fs/cbfs/cbfs.c b/qemu/roms/u-boot/fs/cbfs/cbfs.c
new file mode 100644
index 000000000..c81b61106
--- /dev/null
+++ b/qemu/roms/u-boot/fs/cbfs/cbfs.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <cbfs.h>
+#include <malloc.h>
+#include <asm/byteorder.h>
+
+enum cbfs_result file_cbfs_result;
+
+const char *file_cbfs_error(void)
+{
+ switch (file_cbfs_result) {
+ case CBFS_SUCCESS:
+ return "Success";
+ case CBFS_NOT_INITIALIZED:
+ return "CBFS not initialized";
+ case CBFS_BAD_HEADER:
+ return "Bad CBFS header";
+ case CBFS_BAD_FILE:
+ return "Bad CBFS file";
+ case CBFS_FILE_NOT_FOUND:
+ return "File not found";
+ default:
+ return "Unknown";
+ }
+}
+
+
+static const u32 good_magic = 0x4f524243;
+static const u8 good_file_magic[] = "LARCHIVE";
+
+
+static int initialized;
+static struct cbfs_header cbfs_header;
+static struct cbfs_cachenode *file_cache;
+
+/* Do endian conversion on the CBFS header structure. */
+static void swap_header(struct cbfs_header *dest, struct cbfs_header *src)
+{
+ dest->magic = be32_to_cpu(src->magic);
+ dest->version = be32_to_cpu(src->version);
+ dest->rom_size = be32_to_cpu(src->rom_size);
+ dest->boot_block_size = be32_to_cpu(src->boot_block_size);
+ dest->align = be32_to_cpu(src->align);
+ dest->offset = be32_to_cpu(src->offset);
+}
+
+/* Do endian conversion on a CBFS file header. */
+static void swap_file_header(struct cbfs_fileheader *dest,
+ const struct cbfs_fileheader *src)
+{
+ memcpy(&dest->magic, &src->magic, sizeof(dest->magic));
+ dest->len = be32_to_cpu(src->len);
+ dest->type = be32_to_cpu(src->type);
+ dest->checksum = be32_to_cpu(src->checksum);
+ dest->offset = be32_to_cpu(src->offset);
+}
+
+/*
+ * Given a starting position in memory, scan forward, bounded by a size, and
+ * find the next valid CBFS file. No memory is allocated by this function. The
+ * caller is responsible for allocating space for the new file structure.
+ *
+ * @param start The location in memory to start from.
+ * @param size The size of the memory region to search.
+ * @param align The alignment boundaries to check on.
+ * @param newNode A pointer to the file structure to load.
+ * @param used A pointer to the count of of bytes scanned through,
+ * including the file if one is found.
+ *
+ * @return 1 if a file is found, 0 if one isn't.
+ */
+static int file_cbfs_next_file(u8 *start, u32 size, u32 align,
+ struct cbfs_cachenode *newNode, u32 *used)
+{
+ struct cbfs_fileheader header;
+
+ *used = 0;
+
+ while (size >= align) {
+ const struct cbfs_fileheader *fileHeader =
+ (const struct cbfs_fileheader *)start;
+ u32 name_len;
+ u32 step;
+
+ /* Check if there's a file here. */
+ if (memcmp(good_file_magic, &(fileHeader->magic),
+ sizeof(fileHeader->magic))) {
+ *used += align;
+ size -= align;
+ start += align;
+ continue;
+ }
+
+ swap_file_header(&header, fileHeader);
+ if (header.offset < sizeof(const struct cbfs_cachenode *) ||
+ header.offset > header.len) {
+ file_cbfs_result = CBFS_BAD_FILE;
+ return -1;
+ }
+ newNode->next = NULL;
+ newNode->type = header.type;
+ newNode->data = start + header.offset;
+ newNode->data_length = header.len;
+ name_len = header.offset - sizeof(struct cbfs_cachenode *);
+ newNode->name = (char *)fileHeader +
+ sizeof(struct cbfs_cachenode *);
+ newNode->name_length = name_len;
+ newNode->checksum = header.checksum;
+
+ step = header.len;
+ if (step % align)
+ step = step + align - step % align;
+
+ *used += step;
+ return 1;
+ }
+ return 0;
+}
+
+/* Look through a CBFS instance and copy file metadata into regular memory. */
+static void file_cbfs_fill_cache(u8 *start, u32 size, u32 align)
+{
+ struct cbfs_cachenode *cache_node;
+ struct cbfs_cachenode *newNode;
+ struct cbfs_cachenode **cache_tail = &file_cache;
+
+ /* Clear out old information. */
+ cache_node = file_cache;
+ while (cache_node) {
+ struct cbfs_cachenode *oldNode = cache_node;
+ cache_node = cache_node->next;
+ free(oldNode);
+ }
+ file_cache = NULL;
+
+ while (size >= align) {
+ int result;
+ u32 used;
+
+ newNode = (struct cbfs_cachenode *)
+ malloc(sizeof(struct cbfs_cachenode));
+ result = file_cbfs_next_file(start, size, align,
+ newNode, &used);
+
+ if (result < 0) {
+ free(newNode);
+ return;
+ } else if (result == 0) {
+ free(newNode);
+ break;
+ }
+ *cache_tail = newNode;
+ cache_tail = &newNode->next;
+
+ size -= used;
+ start += used;
+ }
+ file_cbfs_result = CBFS_SUCCESS;
+}
+
+/* Get the CBFS header out of the ROM and do endian conversion. */
+static int file_cbfs_load_header(uintptr_t end_of_rom,
+ struct cbfs_header *header)
+{
+ struct cbfs_header *header_in_rom;
+
+ header_in_rom = (struct cbfs_header *)(uintptr_t)
+ *(u32 *)(end_of_rom - 3);
+ swap_header(header, header_in_rom);
+
+ if (header->magic != good_magic || header->offset >
+ header->rom_size - header->boot_block_size) {
+ file_cbfs_result = CBFS_BAD_HEADER;
+ return 1;
+ }
+ return 0;
+}
+
+void file_cbfs_init(uintptr_t end_of_rom)
+{
+ u8 *start_of_rom;
+ initialized = 0;
+
+ if (file_cbfs_load_header(end_of_rom, &cbfs_header))
+ return;
+
+ start_of_rom = (u8 *)(end_of_rom + 1 - cbfs_header.rom_size);
+
+ file_cbfs_fill_cache(start_of_rom + cbfs_header.offset,
+ cbfs_header.rom_size, cbfs_header.align);
+ if (file_cbfs_result == CBFS_SUCCESS)
+ initialized = 1;
+}
+
+const struct cbfs_header *file_cbfs_get_header(void)
+{
+ if (initialized) {
+ file_cbfs_result = CBFS_SUCCESS;
+ return &cbfs_header;
+ } else {
+ file_cbfs_result = CBFS_NOT_INITIALIZED;
+ return NULL;
+ }
+}
+
+const struct cbfs_cachenode *file_cbfs_get_first(void)
+{
+ if (!initialized) {
+ file_cbfs_result = CBFS_NOT_INITIALIZED;
+ return NULL;
+ } else {
+ file_cbfs_result = CBFS_SUCCESS;
+ return file_cache;
+ }
+}
+
+void file_cbfs_get_next(const struct cbfs_cachenode **file)
+{
+ if (!initialized) {
+ file_cbfs_result = CBFS_NOT_INITIALIZED;
+ file = NULL;
+ return;
+ }
+
+ if (*file)
+ *file = (*file)->next;
+ file_cbfs_result = CBFS_SUCCESS;
+}
+
+const struct cbfs_cachenode *file_cbfs_find(const char *name)
+{
+ struct cbfs_cachenode *cache_node = file_cache;
+
+ if (!initialized) {
+ file_cbfs_result = CBFS_NOT_INITIALIZED;
+ return NULL;
+ }
+
+ while (cache_node) {
+ if (!strcmp(name, cache_node->name))
+ break;
+ cache_node = cache_node->next;
+ }
+ if (!cache_node)
+ file_cbfs_result = CBFS_FILE_NOT_FOUND;
+ else
+ file_cbfs_result = CBFS_SUCCESS;
+
+ return cache_node;
+}
+
+const struct cbfs_cachenode *file_cbfs_find_uncached(uintptr_t end_of_rom,
+ const char *name)
+{
+ u8 *start;
+ u32 size;
+ u32 align;
+ static struct cbfs_cachenode node;
+
+ if (file_cbfs_load_header(end_of_rom, &cbfs_header))
+ return NULL;
+
+ start = (u8 *)(end_of_rom + 1 - cbfs_header.rom_size);
+ size = cbfs_header.rom_size;
+ align = cbfs_header.align;
+
+ while (size >= align) {
+ int result;
+ u32 used;
+
+ result = file_cbfs_next_file(start, size, align, &node, &used);
+
+ if (result < 0)
+ return NULL;
+ else if (result == 0)
+ break;
+
+ if (!strcmp(name, node.name))
+ return &node;
+
+ size -= used;
+ start += used;
+ }
+ file_cbfs_result = CBFS_FILE_NOT_FOUND;
+ return NULL;
+}
+
+const char *file_cbfs_name(const struct cbfs_cachenode *file)
+{
+ file_cbfs_result = CBFS_SUCCESS;
+ return file->name;
+}
+
+u32 file_cbfs_size(const struct cbfs_cachenode *file)
+{
+ file_cbfs_result = CBFS_SUCCESS;
+ return file->data_length;
+}
+
+u32 file_cbfs_type(const struct cbfs_cachenode *file)
+{
+ file_cbfs_result = CBFS_SUCCESS;
+ return file->type;
+}
+
+long file_cbfs_read(const struct cbfs_cachenode *file, void *buffer,
+ unsigned long maxsize)
+{
+ u32 size;
+
+ size = file->data_length;
+ if (maxsize && size > maxsize)
+ size = maxsize;
+
+ memcpy(buffer, file->data, size);
+
+ file_cbfs_result = CBFS_SUCCESS;
+ return size;
+}
diff --git a/qemu/roms/u-boot/fs/cramfs/Makefile b/qemu/roms/u-boot/fs/cramfs/Makefile
new file mode 100644
index 000000000..12d73a375
--- /dev/null
+++ b/qemu/roms/u-boot/fs/cramfs/Makefile
@@ -0,0 +1,9 @@
+#
+# (C) Copyright 2000-2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := cramfs.o
+obj-y += uncompress.o
diff --git a/qemu/roms/u-boot/fs/cramfs/cramfs.c b/qemu/roms/u-boot/fs/cramfs/cramfs.c
new file mode 100644
index 000000000..fd8e4ef31
--- /dev/null
+++ b/qemu/roms/u-boot/fs/cramfs/cramfs.c
@@ -0,0 +1,348 @@
+/*
+ * cramfs.c
+ *
+ * Copyright (C) 1999 Linus Torvalds
+ *
+ * Copyright (C) 2000-2002 Transmeta Corporation
+ *
+ * Copyright (C) 2003 Kai-Uwe Bloem,
+ * Auerswald GmbH & Co KG, <linux-development@auerswald.de>
+ * - adapted from the www.tuxbox.org u-boot tree, added "ls" command
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * Compressed ROM filesystem for Linux.
+ *
+ * TODO:
+ * add support for resolving symbolic links
+ */
+
+/*
+ * These are the VFS interfaces to the compressed ROM filesystem.
+ * The actual compression is based on zlib, see the other files.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <asm/byteorder.h>
+#include <linux/stat.h>
+#include <jffs2/jffs2.h>
+#include <jffs2/load_kernel.h>
+#include <cramfs/cramfs_fs.h>
+
+/* These two macros may change in future, to provide better st_ino
+ semantics. */
+#define CRAMINO(x) (CRAMFS_GET_OFFSET(x) ? CRAMFS_GET_OFFSET(x)<<2 : 1)
+#define OFFSET(x) ((x)->i_ino)
+
+struct cramfs_super super;
+
+/* CPU address space offset calculation macro, struct part_info offset is
+ * device address space offset, so we need to shift it by a device start address. */
+#if !defined(CONFIG_SYS_NO_FLASH)
+extern flash_info_t flash_info[];
+#define PART_OFFSET(x) ((ulong)x->offset + \
+ flash_info[x->dev->id->num].start[0])
+#else
+#define PART_OFFSET(x) ((ulong)x->offset)
+#endif
+
+static int cramfs_read_super (struct part_info *info)
+{
+ unsigned long root_offset;
+
+ /* Read the first block and get the superblock from it */
+ memcpy (&super, (void *) PART_OFFSET(info), sizeof (super));
+
+ /* Do sanity checks on the superblock */
+ if (super.magic != CRAMFS_32 (CRAMFS_MAGIC)) {
+ /* check at 512 byte offset */
+ memcpy (&super, (void *) PART_OFFSET(info) + 512, sizeof (super));
+ if (super.magic != CRAMFS_32 (CRAMFS_MAGIC)) {
+ printf ("cramfs: wrong magic\n");
+ return -1;
+ }
+ }
+
+ /* flags is reused several times, so swab it once */
+ super.flags = CRAMFS_32 (super.flags);
+ super.size = CRAMFS_32 (super.size);
+
+ /* get feature flags first */
+ if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) {
+ printf ("cramfs: unsupported filesystem features\n");
+ return -1;
+ }
+
+ /* Check that the root inode is in a sane state */
+ if (!S_ISDIR (CRAMFS_16 (super.root.mode))) {
+ printf ("cramfs: root is not a directory\n");
+ return -1;
+ }
+ root_offset = CRAMFS_GET_OFFSET (&(super.root)) << 2;
+ if (root_offset == 0) {
+ printf ("cramfs: empty filesystem");
+ } else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
+ ((root_offset != sizeof (struct cramfs_super)) &&
+ (root_offset != 512 + sizeof (struct cramfs_super)))) {
+ printf ("cramfs: bad root offset %lu\n", root_offset);
+ return -1;
+ }
+
+ return 0;
+}
+
+static unsigned long cramfs_resolve (unsigned long begin, unsigned long offset,
+ unsigned long size, int raw,
+ char *filename)
+{
+ unsigned long inodeoffset = 0, nextoffset;
+
+ while (inodeoffset < size) {
+ struct cramfs_inode *inode;
+ char *name;
+ int namelen;
+
+ inode = (struct cramfs_inode *) (begin + offset +
+ inodeoffset);
+
+ /*
+ * Namelengths on disk are shifted by two
+ * and the name padded out to 4-byte boundaries
+ * with zeroes.
+ */
+ namelen = CRAMFS_GET_NAMELEN (inode) << 2;
+ name = (char *) inode + sizeof (struct cramfs_inode);
+
+ nextoffset =
+ inodeoffset + sizeof (struct cramfs_inode) + namelen;
+
+ for (;;) {
+ if (!namelen)
+ return -1;
+ if (name[namelen - 1])
+ break;
+ namelen--;
+ }
+
+ if (!strncmp(filename, name, namelen) &&
+ (namelen == strlen(filename))) {
+ char *p = strtok (NULL, "/");
+
+ if (raw && (p == NULL || *p == '\0'))
+ return offset + inodeoffset;
+
+ if (S_ISDIR (CRAMFS_16 (inode->mode))) {
+ return cramfs_resolve (begin,
+ CRAMFS_GET_OFFSET
+ (inode) << 2,
+ CRAMFS_24 (inode->
+ size), raw,
+ p);
+ } else if (S_ISREG (CRAMFS_16 (inode->mode))) {
+ return offset + inodeoffset;
+ } else {
+ printf ("%*.*s: unsupported file type (%x)\n",
+ namelen, namelen, name,
+ CRAMFS_16 (inode->mode));
+ return 0;
+ }
+ }
+
+ inodeoffset = nextoffset;
+ }
+
+ printf ("can't find corresponding entry\n");
+ return 0;
+}
+
+static int cramfs_uncompress (unsigned long begin, unsigned long offset,
+ unsigned long loadoffset)
+{
+ struct cramfs_inode *inode = (struct cramfs_inode *) (begin + offset);
+ unsigned long *block_ptrs = (unsigned long *)
+ (begin + (CRAMFS_GET_OFFSET (inode) << 2));
+ unsigned long curr_block = (CRAMFS_GET_OFFSET (inode) +
+ (((CRAMFS_24 (inode->size)) +
+ 4095) >> 12)) << 2;
+ int size, total_size = 0;
+ int i;
+
+ cramfs_uncompress_init ();
+
+ for (i = 0; i < ((CRAMFS_24 (inode->size) + 4095) >> 12); i++) {
+ size = cramfs_uncompress_block ((void *) loadoffset,
+ (void *) (begin + curr_block),
+ (CRAMFS_32 (block_ptrs[i]) -
+ curr_block));
+ if (size < 0)
+ return size;
+ loadoffset += size;
+ total_size += size;
+ curr_block = CRAMFS_32 (block_ptrs[i]);
+ }
+
+ cramfs_uncompress_exit ();
+ return total_size;
+}
+
+int cramfs_load (char *loadoffset, struct part_info *info, char *filename)
+{
+ unsigned long offset;
+
+ if (cramfs_read_super (info))
+ return -1;
+
+ offset = cramfs_resolve (PART_OFFSET(info),
+ CRAMFS_GET_OFFSET (&(super.root)) << 2,
+ CRAMFS_24 (super.root.size), 0,
+ strtok (filename, "/"));
+
+ if (offset <= 0)
+ return offset;
+
+ return cramfs_uncompress (PART_OFFSET(info), offset,
+ (unsigned long) loadoffset);
+}
+
+static int cramfs_list_inode (struct part_info *info, unsigned long offset)
+{
+ struct cramfs_inode *inode = (struct cramfs_inode *)
+ (PART_OFFSET(info) + offset);
+ char *name, str[20];
+ int namelen, nextoff;
+
+ /*
+ * Namelengths on disk are shifted by two
+ * and the name padded out to 4-byte boundaries
+ * with zeroes.
+ */
+ namelen = CRAMFS_GET_NAMELEN (inode) << 2;
+ name = (char *) inode + sizeof (struct cramfs_inode);
+ nextoff = namelen;
+
+ for (;;) {
+ if (!namelen)
+ return namelen;
+ if (name[namelen - 1])
+ break;
+ namelen--;
+ }
+
+ printf (" %s %8d %*.*s", mkmodestr (CRAMFS_16 (inode->mode), str),
+ CRAMFS_24 (inode->size), namelen, namelen, name);
+
+ if ((CRAMFS_16 (inode->mode) & S_IFMT) == S_IFLNK) {
+ /* symbolic link.
+ * Unpack the link target, trusting in the inode's size field.
+ */
+ unsigned long size = CRAMFS_24 (inode->size);
+ char *link = malloc (size);
+
+ if (link != NULL && cramfs_uncompress (PART_OFFSET(info), offset,
+ (unsigned long) link)
+ == size)
+ printf (" -> %*.*s\n", (int) size, (int) size, link);
+ else
+ printf (" [Error reading link]\n");
+ if (link)
+ free (link);
+ } else
+ printf ("\n");
+
+ return nextoff;
+}
+
+int cramfs_ls (struct part_info *info, char *filename)
+{
+ struct cramfs_inode *inode;
+ unsigned long inodeoffset = 0, nextoffset;
+ unsigned long offset, size;
+
+ if (cramfs_read_super (info))
+ return -1;
+
+ if (strlen (filename) == 0 || !strcmp (filename, "/")) {
+ /* Root directory. Use root inode in super block */
+ offset = CRAMFS_GET_OFFSET (&(super.root)) << 2;
+ size = CRAMFS_24 (super.root.size);
+ } else {
+ /* Resolve the path */
+ offset = cramfs_resolve (PART_OFFSET(info),
+ CRAMFS_GET_OFFSET (&(super.root)) <<
+ 2, CRAMFS_24 (super.root.size), 1,
+ strtok (filename, "/"));
+
+ if (offset <= 0)
+ return offset;
+
+ /* Resolving was successful. Examine the inode */
+ inode = (struct cramfs_inode *) (PART_OFFSET(info) + offset);
+ if (!S_ISDIR (CRAMFS_16 (inode->mode))) {
+ /* It's not a directory - list it, and that's that */
+ return (cramfs_list_inode (info, offset) > 0);
+ }
+
+ /* It's a directory. List files within */
+ offset = CRAMFS_GET_OFFSET (inode) << 2;
+ size = CRAMFS_24 (inode->size);
+ }
+
+ /* List the given directory */
+ while (inodeoffset < size) {
+ inode = (struct cramfs_inode *) (PART_OFFSET(info) + offset +
+ inodeoffset);
+
+ nextoffset = cramfs_list_inode (info, offset + inodeoffset);
+ if (nextoffset == 0)
+ break;
+ inodeoffset += sizeof (struct cramfs_inode) + nextoffset;
+ }
+
+ return 1;
+}
+
+int cramfs_info (struct part_info *info)
+{
+ if (cramfs_read_super (info))
+ return 0;
+
+ printf ("size: 0x%x (%u)\n", super.size, super.size);
+
+ if (super.flags != 0) {
+ printf ("flags:\n");
+ if (super.flags & CRAMFS_FLAG_FSID_VERSION_2)
+ printf ("\tFSID version 2\n");
+ if (super.flags & CRAMFS_FLAG_SORTED_DIRS)
+ printf ("\tsorted dirs\n");
+ if (super.flags & CRAMFS_FLAG_HOLES)
+ printf ("\tholes\n");
+ if (super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET)
+ printf ("\tshifted root offset\n");
+ }
+
+ printf ("fsid:\n\tcrc: 0x%x\n\tedition: 0x%x\n",
+ super.fsid.crc, super.fsid.edition);
+ printf ("name: %16s\n", super.name);
+
+ return 1;
+}
+
+int cramfs_check (struct part_info *info)
+{
+ struct cramfs_super *sb;
+
+ if (info->dev->id->type != MTD_DEV_TYPE_NOR)
+ return 0;
+
+ sb = (struct cramfs_super *) PART_OFFSET(info);
+ if (sb->magic != CRAMFS_32 (CRAMFS_MAGIC)) {
+ /* check at 512 byte offset */
+ sb = (struct cramfs_super *) (PART_OFFSET(info) + 512);
+ if (sb->magic != CRAMFS_32 (CRAMFS_MAGIC))
+ return 0;
+ }
+ return 1;
+}
diff --git a/qemu/roms/u-boot/fs/cramfs/uncompress.c b/qemu/roms/u-boot/fs/cramfs/uncompress.c
new file mode 100644
index 000000000..f431cc46c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/cramfs/uncompress.c
@@ -0,0 +1,83 @@
+/*
+ * uncompress.c
+ *
+ * Copyright (C) 1999 Linus Torvalds
+ * Copyright (C) 2000-2002 Transmeta Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * cramfs interfaces to the uncompression library. There's really just
+ * three entrypoints:
+ *
+ * - cramfs_uncompress_init() - called to initialize the thing.
+ * - cramfs_uncompress_exit() - tell me when you're done
+ * - cramfs_uncompress_block() - uncompress a block.
+ *
+ * NOTE NOTE NOTE! The uncompression is entirely single-threaded. We
+ * only have one stream, and we'll initialize it only once even if it
+ * then is used by multiple filesystems.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <watchdog.h>
+#include <u-boot/zlib.h>
+
+static z_stream stream;
+
+/* Returns length of decompressed data. */
+int cramfs_uncompress_block (void *dst, void *src, int srclen)
+{
+ int err;
+
+ inflateReset (&stream);
+
+ stream.next_in = src;
+ stream.avail_in = srclen;
+
+ stream.next_out = dst;
+ stream.avail_out = 4096 * 2;
+
+ err = inflate (&stream, Z_FINISH);
+
+ if (err != Z_STREAM_END)
+ goto err;
+ return stream.total_out;
+
+ err:
+ /*printf ("Error %d while decompressing!\n", err); */
+ /*printf ("%p(%d)->%p\n", src, srclen, dst); */
+ return -1;
+}
+
+int cramfs_uncompress_init (void)
+{
+ int err;
+
+ stream.zalloc = gzalloc;
+ stream.zfree = gzfree;
+ stream.next_in = 0;
+ stream.avail_in = 0;
+
+#if defined(CONFIG_HW_WATCHDOG) || defined(CONFIG_WATCHDOG)
+ stream.outcb = (cb_func) WATCHDOG_RESET;
+#else
+ stream.outcb = Z_NULL;
+#endif /* CONFIG_HW_WATCHDOG */
+
+ err = inflateInit (&stream);
+ if (err != Z_OK) {
+ printf ("Error: inflateInit2() returned %d\n", err);
+ return -1;
+ }
+
+ return 0;
+}
+
+int cramfs_uncompress_exit (void)
+{
+ inflateEnd (&stream);
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/ext4/Makefile b/qemu/roms/u-boot/fs/ext4/Makefile
new file mode 100644
index 000000000..8d15bdad6
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/Makefile
@@ -0,0 +1,13 @@
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# (C) Copyright 2003
+# Pavel Bartusek, Sysgo Real-Time Solutions AG, pba@sysgo.de
+#
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := ext4fs.o ext4_common.o dev.o
+obj-$(CONFIG_EXT4_WRITE) += ext4_write.o ext4_journal.o crc16.o
diff --git a/qemu/roms/u-boot/fs/ext4/crc16.c b/qemu/roms/u-boot/fs/ext4/crc16.c
new file mode 100644
index 000000000..3afb34dae
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/crc16.c
@@ -0,0 +1,62 @@
+/*
+ * crc16.c
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <common.h>
+#include <asm/byteorder.h>
+#include <linux/stat.h>
+#include "crc16.h"
+
+/** CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1) */
+static __u16 const crc16_table[256] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
+};
+
+/**
+ * Compute the CRC-16 for the data buffer
+*/
+
+unsigned int ext2fs_crc16(unsigned int crc,
+ const void *buffer, unsigned int len)
+{
+ const unsigned char *cp = buffer;
+
+ while (len--)
+ crc = (((crc >> 8) & 0xffU) ^
+ crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU;
+ return crc;
+}
diff --git a/qemu/roms/u-boot/fs/ext4/crc16.h b/qemu/roms/u-boot/fs/ext4/crc16.h
new file mode 100644
index 000000000..5fd113a56
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/crc16.h
@@ -0,0 +1,16 @@
+/*
+ * crc16.h - CRC-16 routine
+ * Implements the standard CRC-16:
+ * Width 16
+ * Poly 0x8005 (x16 + x15 + x2 + 1)
+ * Init 0
+ *
+ * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#ifndef __CRC16_H
+#define __CRC16_H
+extern unsigned int ext2fs_crc16(unsigned int crc,
+ const void *buffer, unsigned int len);
+#endif
diff --git a/qemu/roms/u-boot/fs/ext4/dev.c b/qemu/roms/u-boot/fs/ext4/dev.c
new file mode 100644
index 000000000..e0b513a4e
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/dev.c
@@ -0,0 +1,146 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * made from existing ext2/dev.c file of Uboot
+ * (C) Copyright 2004
+ * esd gmbh <www.esd-electronics.com>
+ * Reinhard Arlt <reinhard.arlt@esd-electronics.com>
+ *
+ * based on code of fs/reiserfs/dev.c by
+ *
+ * (C) Copyright 2003 - 2004
+ * Sysgo AG, <www.elinos.com>, Pavel Bartusek <pba@sysgo.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/*
+ * Changelog:
+ * 0.1 - Newly created file for ext4fs support. Taken from
+ * fs/ext2/dev.c file in uboot.
+ */
+
+#include <common.h>
+#include <config.h>
+#include <ext4fs.h>
+#include <ext_common.h>
+#include "ext4_common.h"
+
+lbaint_t part_offset;
+
+static block_dev_desc_t *ext4fs_block_dev_desc;
+static disk_partition_t *part_info;
+
+void ext4fs_set_blk_dev(block_dev_desc_t *rbdd, disk_partition_t *info)
+{
+ assert(rbdd->blksz == (1 << rbdd->log2blksz));
+ ext4fs_block_dev_desc = rbdd;
+ get_fs()->dev_desc = rbdd;
+ part_info = info;
+ part_offset = info->start;
+ get_fs()->total_sect = ((uint64_t)info->size * info->blksz) >>
+ get_fs()->dev_desc->log2blksz;
+}
+
+int ext4fs_devread(lbaint_t sector, int byte_offset, int byte_len, char *buf)
+{
+ unsigned block_len;
+ int log2blksz = ext4fs_block_dev_desc->log2blksz;
+ ALLOC_CACHE_ALIGN_BUFFER(char, sec_buf, (ext4fs_block_dev_desc ?
+ ext4fs_block_dev_desc->blksz :
+ 0));
+ if (ext4fs_block_dev_desc == NULL) {
+ printf("** Invalid Block Device Descriptor (NULL)\n");
+ return 0;
+ }
+
+ /* Check partition boundaries */
+ if ((sector < 0) ||
+ ((sector + ((byte_offset + byte_len - 1) >> log2blksz))
+ >= part_info->size)) {
+ printf("%s read outside partition " LBAFU "\n", __func__,
+ sector);
+ return 0;
+ }
+
+ /* Get the read to the beginning of a partition */
+ sector += byte_offset >> log2blksz;
+ byte_offset &= ext4fs_block_dev_desc->blksz - 1;
+
+ debug(" <" LBAFU ", %d, %d>\n", sector, byte_offset, byte_len);
+
+ if (byte_offset != 0) {
+ /* read first part which isn't aligned with start of sector */
+ if (ext4fs_block_dev_desc->
+ block_read(ext4fs_block_dev_desc->dev,
+ part_info->start + sector, 1,
+ (unsigned long *) sec_buf) != 1) {
+ printf(" ** ext2fs_devread() read error **\n");
+ return 0;
+ }
+ memcpy(buf, sec_buf + byte_offset,
+ min(ext4fs_block_dev_desc->blksz
+ - byte_offset, byte_len));
+ buf += min(ext4fs_block_dev_desc->blksz
+ - byte_offset, byte_len);
+ byte_len -= min(ext4fs_block_dev_desc->blksz
+ - byte_offset, byte_len);
+ sector++;
+ }
+
+ if (byte_len == 0)
+ return 1;
+
+ /* read sector aligned part */
+ block_len = byte_len & ~(ext4fs_block_dev_desc->blksz - 1);
+
+ if (block_len == 0) {
+ ALLOC_CACHE_ALIGN_BUFFER(u8, p, ext4fs_block_dev_desc->blksz);
+
+ block_len = ext4fs_block_dev_desc->blksz;
+ ext4fs_block_dev_desc->block_read(ext4fs_block_dev_desc->dev,
+ part_info->start + sector,
+ 1, (unsigned long *)p);
+ memcpy(buf, p, byte_len);
+ return 1;
+ }
+
+ if (ext4fs_block_dev_desc->block_read(ext4fs_block_dev_desc->dev,
+ part_info->start + sector,
+ block_len >> log2blksz,
+ (unsigned long *) buf) !=
+ block_len >> log2blksz) {
+ printf(" ** %s read error - block\n", __func__);
+ return 0;
+ }
+ block_len = byte_len & ~(ext4fs_block_dev_desc->blksz - 1);
+ buf += block_len;
+ byte_len -= block_len;
+ sector += block_len / ext4fs_block_dev_desc->blksz;
+
+ if (byte_len != 0) {
+ /* read rest of data which are not in whole sector */
+ if (ext4fs_block_dev_desc->
+ block_read(ext4fs_block_dev_desc->dev,
+ part_info->start + sector, 1,
+ (unsigned long *) sec_buf) != 1) {
+ printf("* %s read error - last part\n", __func__);
+ return 0;
+ }
+ memcpy(buf, sec_buf, byte_len);
+ }
+ return 1;
+}
+
+int ext4_read_superblock(char *buffer)
+{
+ struct ext_filesystem *fs = get_fs();
+ int sect = SUPERBLOCK_START >> fs->dev_desc->log2blksz;
+ int off = SUPERBLOCK_START % fs->dev_desc->blksz;
+
+ return ext4fs_devread(sect, off, SUPERBLOCK_SIZE,
+ buffer);
+}
diff --git a/qemu/roms/u-boot/fs/ext4/ext4_common.c b/qemu/roms/u-boot/fs/ext4/ext4_common.c
new file mode 100644
index 000000000..1c1172163
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/ext4_common.c
@@ -0,0 +1,2250 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * ext4ls and ext4load : Based on ext2 ls load support in Uboot.
+ *
+ * (C) Copyright 2004
+ * esd gmbh <www.esd-electronics.com>
+ * Reinhard Arlt <reinhard.arlt@esd-electronics.com>
+ *
+ * based on code from grub2 fs/ext2.c and fs/fshelp.c by
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ *
+ * ext4write : Based on generic ext4 protocol.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <ext_common.h>
+#include <ext4fs.h>
+#include <malloc.h>
+#include <stddef.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <asm/byteorder.h>
+#include "ext4_common.h"
+
+struct ext2_data *ext4fs_root;
+struct ext2fs_node *ext4fs_file;
+uint32_t *ext4fs_indir1_block;
+int ext4fs_indir1_size;
+int ext4fs_indir1_blkno = -1;
+uint32_t *ext4fs_indir2_block;
+int ext4fs_indir2_size;
+int ext4fs_indir2_blkno = -1;
+
+uint32_t *ext4fs_indir3_block;
+int ext4fs_indir3_size;
+int ext4fs_indir3_blkno = -1;
+struct ext2_inode *g_parent_inode;
+static int symlinknest;
+
+#if defined(CONFIG_EXT4_WRITE)
+uint32_t ext4fs_div_roundup(uint32_t size, uint32_t n)
+{
+ uint32_t res = size / n;
+ if (res * n != size)
+ res++;
+
+ return res;
+}
+
+void put_ext4(uint64_t off, void *buf, uint32_t size)
+{
+ uint64_t startblock;
+ uint64_t remainder;
+ unsigned char *temp_ptr = NULL;
+ struct ext_filesystem *fs = get_fs();
+ int log2blksz = fs->dev_desc->log2blksz;
+ ALLOC_CACHE_ALIGN_BUFFER(unsigned char, sec_buf, fs->dev_desc->blksz);
+
+ startblock = off >> log2blksz;
+ startblock += part_offset;
+ remainder = off & (uint64_t)(fs->dev_desc->blksz - 1);
+
+ if (fs->dev_desc == NULL)
+ return;
+
+ if ((startblock + (size >> log2blksz)) >
+ (part_offset + fs->total_sect)) {
+ printf("part_offset is " LBAFU "\n", part_offset);
+ printf("total_sector is %llu\n", fs->total_sect);
+ printf("error: overflow occurs\n");
+ return;
+ }
+
+ if (remainder) {
+ if (fs->dev_desc->block_read) {
+ fs->dev_desc->block_read(fs->dev_desc->dev,
+ startblock, 1, sec_buf);
+ temp_ptr = sec_buf;
+ memcpy((temp_ptr + remainder),
+ (unsigned char *)buf, size);
+ fs->dev_desc->block_write(fs->dev_desc->dev,
+ startblock, 1, sec_buf);
+ }
+ } else {
+ if (size >> log2blksz != 0) {
+ fs->dev_desc->block_write(fs->dev_desc->dev,
+ startblock,
+ size >> log2blksz,
+ (unsigned long *)buf);
+ } else {
+ fs->dev_desc->block_read(fs->dev_desc->dev,
+ startblock, 1, sec_buf);
+ temp_ptr = sec_buf;
+ memcpy(temp_ptr, buf, size);
+ fs->dev_desc->block_write(fs->dev_desc->dev,
+ startblock, 1,
+ (unsigned long *)sec_buf);
+ }
+ }
+}
+
+static int _get_new_inode_no(unsigned char *buffer)
+{
+ struct ext_filesystem *fs = get_fs();
+ unsigned char input;
+ int operand, status;
+ int count = 1;
+ int j = 0;
+
+ /* get the blocksize of the filesystem */
+ unsigned char *ptr = buffer;
+ while (*ptr == 255) {
+ ptr++;
+ count += 8;
+ if (count > ext4fs_root->sblock.inodes_per_group)
+ return -1;
+ }
+
+ for (j = 0; j < fs->blksz; j++) {
+ input = *ptr;
+ int i = 0;
+ while (i <= 7) {
+ operand = 1 << i;
+ status = input & operand;
+ if (status) {
+ i++;
+ count++;
+ } else {
+ *ptr |= operand;
+ return count;
+ }
+ }
+ ptr = ptr + 1;
+ }
+
+ return -1;
+}
+
+static int _get_new_blk_no(unsigned char *buffer)
+{
+ unsigned char input;
+ int operand, status;
+ int count = 0;
+ int j = 0;
+ unsigned char *ptr = buffer;
+ struct ext_filesystem *fs = get_fs();
+
+ if (fs->blksz != 1024)
+ count = 0;
+ else
+ count = 1;
+
+ while (*ptr == 255) {
+ ptr++;
+ count += 8;
+ if (count == (fs->blksz * 8))
+ return -1;
+ }
+
+ for (j = 0; j < fs->blksz; j++) {
+ input = *ptr;
+ int i = 0;
+ while (i <= 7) {
+ operand = 1 << i;
+ status = input & operand;
+ if (status) {
+ i++;
+ count++;
+ } else {
+ *ptr |= operand;
+ return count;
+ }
+ }
+ ptr = ptr + 1;
+ }
+
+ return -1;
+}
+
+int ext4fs_set_block_bmap(long int blockno, unsigned char *buffer, int index)
+{
+ int i, remainder, status;
+ unsigned char *ptr = buffer;
+ unsigned char operand;
+ i = blockno / 8;
+ remainder = blockno % 8;
+ int blocksize = EXT2_BLOCK_SIZE(ext4fs_root);
+
+ i = i - (index * blocksize);
+ if (blocksize != 1024) {
+ ptr = ptr + i;
+ operand = 1 << remainder;
+ status = *ptr & operand;
+ if (status)
+ return -1;
+
+ *ptr = *ptr | operand;
+ return 0;
+ } else {
+ if (remainder == 0) {
+ ptr = ptr + i - 1;
+ operand = (1 << 7);
+ } else {
+ ptr = ptr + i;
+ operand = (1 << (remainder - 1));
+ }
+ status = *ptr & operand;
+ if (status)
+ return -1;
+
+ *ptr = *ptr | operand;
+ return 0;
+ }
+}
+
+void ext4fs_reset_block_bmap(long int blockno, unsigned char *buffer, int index)
+{
+ int i, remainder, status;
+ unsigned char *ptr = buffer;
+ unsigned char operand;
+ i = blockno / 8;
+ remainder = blockno % 8;
+ int blocksize = EXT2_BLOCK_SIZE(ext4fs_root);
+
+ i = i - (index * blocksize);
+ if (blocksize != 1024) {
+ ptr = ptr + i;
+ operand = (1 << remainder);
+ status = *ptr & operand;
+ if (status)
+ *ptr = *ptr & ~(operand);
+ } else {
+ if (remainder == 0) {
+ ptr = ptr + i - 1;
+ operand = (1 << 7);
+ } else {
+ ptr = ptr + i;
+ operand = (1 << (remainder - 1));
+ }
+ status = *ptr & operand;
+ if (status)
+ *ptr = *ptr & ~(operand);
+ }
+}
+
+int ext4fs_set_inode_bmap(int inode_no, unsigned char *buffer, int index)
+{
+ int i, remainder, status;
+ unsigned char *ptr = buffer;
+ unsigned char operand;
+
+ inode_no -= (index * ext4fs_root->sblock.inodes_per_group);
+ i = inode_no / 8;
+ remainder = inode_no % 8;
+ if (remainder == 0) {
+ ptr = ptr + i - 1;
+ operand = (1 << 7);
+ } else {
+ ptr = ptr + i;
+ operand = (1 << (remainder - 1));
+ }
+ status = *ptr & operand;
+ if (status)
+ return -1;
+
+ *ptr = *ptr | operand;
+
+ return 0;
+}
+
+void ext4fs_reset_inode_bmap(int inode_no, unsigned char *buffer, int index)
+{
+ int i, remainder, status;
+ unsigned char *ptr = buffer;
+ unsigned char operand;
+
+ inode_no -= (index * ext4fs_root->sblock.inodes_per_group);
+ i = inode_no / 8;
+ remainder = inode_no % 8;
+ if (remainder == 0) {
+ ptr = ptr + i - 1;
+ operand = (1 << 7);
+ } else {
+ ptr = ptr + i;
+ operand = (1 << (remainder - 1));
+ }
+ status = *ptr & operand;
+ if (status)
+ *ptr = *ptr & ~(operand);
+}
+
+int ext4fs_checksum_update(unsigned int i)
+{
+ struct ext2_block_group *desc;
+ struct ext_filesystem *fs = get_fs();
+ __u16 crc = 0;
+
+ desc = (struct ext2_block_group *)&fs->bgd[i];
+ if (fs->sb->feature_ro_compat & EXT4_FEATURE_RO_COMPAT_GDT_CSUM) {
+ int offset = offsetof(struct ext2_block_group, bg_checksum);
+
+ crc = ext2fs_crc16(~0, fs->sb->unique_id,
+ sizeof(fs->sb->unique_id));
+ crc = ext2fs_crc16(crc, &i, sizeof(i));
+ crc = ext2fs_crc16(crc, desc, offset);
+ offset += sizeof(desc->bg_checksum); /* skip checksum */
+ assert(offset == sizeof(*desc));
+ }
+
+ return crc;
+}
+
+static int check_void_in_dentry(struct ext2_dirent *dir, char *filename)
+{
+ int dentry_length;
+ int sizeof_void_space;
+ int new_entry_byte_reqd;
+ short padding_factor = 0;
+
+ if (dir->namelen % 4 != 0)
+ padding_factor = 4 - (dir->namelen % 4);
+
+ dentry_length = sizeof(struct ext2_dirent) +
+ dir->namelen + padding_factor;
+ sizeof_void_space = dir->direntlen - dentry_length;
+ if (sizeof_void_space == 0)
+ return 0;
+
+ padding_factor = 0;
+ if (strlen(filename) % 4 != 0)
+ padding_factor = 4 - (strlen(filename) % 4);
+
+ new_entry_byte_reqd = strlen(filename) +
+ sizeof(struct ext2_dirent) + padding_factor;
+ if (sizeof_void_space >= new_entry_byte_reqd) {
+ dir->direntlen = dentry_length;
+ return sizeof_void_space;
+ }
+
+ return 0;
+}
+
+void ext4fs_update_parent_dentry(char *filename, int *p_ino, int file_type)
+{
+ unsigned int *zero_buffer = NULL;
+ char *root_first_block_buffer = NULL;
+ int direct_blk_idx;
+ long int root_blknr;
+ long int first_block_no_of_root = 0;
+ long int previous_blknr = -1;
+ int totalbytes = 0;
+ short int padding_factor = 0;
+ unsigned int new_entry_byte_reqd;
+ unsigned int last_entry_dirlen;
+ int sizeof_void_space = 0;
+ int templength = 0;
+ int inodeno;
+ int status;
+ struct ext_filesystem *fs = get_fs();
+ /* directory entry */
+ struct ext2_dirent *dir;
+ char *temp_dir = NULL;
+
+ zero_buffer = zalloc(fs->blksz);
+ if (!zero_buffer) {
+ printf("No Memory\n");
+ return;
+ }
+ root_first_block_buffer = zalloc(fs->blksz);
+ if (!root_first_block_buffer) {
+ free(zero_buffer);
+ printf("No Memory\n");
+ return;
+ }
+restart:
+
+ /* read the block no allocated to a file */
+ for (direct_blk_idx = 0; direct_blk_idx < INDIRECT_BLOCKS;
+ direct_blk_idx++) {
+ root_blknr = read_allocated_block(g_parent_inode,
+ direct_blk_idx);
+ if (root_blknr == 0) {
+ first_block_no_of_root = previous_blknr;
+ break;
+ }
+ previous_blknr = root_blknr;
+ }
+
+ status = ext4fs_devread((lbaint_t)first_block_no_of_root
+ * fs->sect_perblk,
+ 0, fs->blksz, root_first_block_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(root_first_block_buffer, first_block_no_of_root))
+ goto fail;
+ dir = (struct ext2_dirent *)root_first_block_buffer;
+ totalbytes = 0;
+ while (dir->direntlen > 0) {
+ /*
+ * blocksize-totalbytes because last directory length
+ * i.e. dir->direntlen is free availble space in the
+ * block that means it is a last entry of directory
+ * entry
+ */
+
+ /* traversing the each directory entry */
+ if (fs->blksz - totalbytes == dir->direntlen) {
+ if (strlen(filename) % 4 != 0)
+ padding_factor = 4 - (strlen(filename) % 4);
+
+ new_entry_byte_reqd = strlen(filename) +
+ sizeof(struct ext2_dirent) + padding_factor;
+ padding_factor = 0;
+ /*
+ * update last directory entry length to its
+ * length because we are creating new directory
+ * entry
+ */
+ if (dir->namelen % 4 != 0)
+ padding_factor = 4 - (dir->namelen % 4);
+
+ last_entry_dirlen = dir->namelen +
+ sizeof(struct ext2_dirent) + padding_factor;
+ if ((fs->blksz - totalbytes - last_entry_dirlen) <
+ new_entry_byte_reqd) {
+ printf("1st Block Full:Allocate new block\n");
+
+ if (direct_blk_idx == INDIRECT_BLOCKS - 1) {
+ printf("Directory exceeds limit\n");
+ goto fail;
+ }
+ g_parent_inode->b.blocks.dir_blocks
+ [direct_blk_idx] = ext4fs_get_new_blk_no();
+ if (g_parent_inode->b.blocks.dir_blocks
+ [direct_blk_idx] == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ put_ext4(((uint64_t)
+ ((uint64_t)g_parent_inode->b.
+ blocks.dir_blocks[direct_blk_idx] *
+ (uint64_t)fs->blksz)), zero_buffer, fs->blksz);
+ g_parent_inode->size =
+ g_parent_inode->size + fs->blksz;
+ g_parent_inode->blockcnt =
+ g_parent_inode->blockcnt + fs->sect_perblk;
+ if (ext4fs_put_metadata
+ (root_first_block_buffer,
+ first_block_no_of_root))
+ goto fail;
+ goto restart;
+ }
+ dir->direntlen = last_entry_dirlen;
+ break;
+ }
+
+ templength = dir->direntlen;
+ totalbytes = totalbytes + templength;
+ sizeof_void_space = check_void_in_dentry(dir, filename);
+ if (sizeof_void_space)
+ break;
+
+ dir = (struct ext2_dirent *)((char *)dir + templength);
+ }
+
+ /* make a pointer ready for creating next directory entry */
+ templength = dir->direntlen;
+ totalbytes = totalbytes + templength;
+ dir = (struct ext2_dirent *)((char *)dir + templength);
+
+ /* get the next available inode number */
+ inodeno = ext4fs_get_new_inode_no();
+ if (inodeno == -1) {
+ printf("no inode left to assign\n");
+ goto fail;
+ }
+ dir->inode = inodeno;
+ if (sizeof_void_space)
+ dir->direntlen = sizeof_void_space;
+ else
+ dir->direntlen = fs->blksz - totalbytes;
+
+ dir->namelen = strlen(filename);
+ dir->filetype = FILETYPE_REG; /* regular file */
+ temp_dir = (char *)dir;
+ temp_dir = temp_dir + sizeof(struct ext2_dirent);
+ memcpy(temp_dir, filename, strlen(filename));
+
+ *p_ino = inodeno;
+
+ /* update or write the 1st block of root inode */
+ if (ext4fs_put_metadata(root_first_block_buffer,
+ first_block_no_of_root))
+ goto fail;
+
+fail:
+ free(zero_buffer);
+ free(root_first_block_buffer);
+}
+
+static int search_dir(struct ext2_inode *parent_inode, char *dirname)
+{
+ int status;
+ int inodeno;
+ int totalbytes;
+ int templength;
+ int direct_blk_idx;
+ long int blknr;
+ int found = 0;
+ char *ptr = NULL;
+ unsigned char *block_buffer = NULL;
+ struct ext2_dirent *dir = NULL;
+ struct ext2_dirent *previous_dir = NULL;
+ struct ext_filesystem *fs = get_fs();
+
+ /* read the block no allocated to a file */
+ for (direct_blk_idx = 0; direct_blk_idx < INDIRECT_BLOCKS;
+ direct_blk_idx++) {
+ blknr = read_allocated_block(parent_inode, direct_blk_idx);
+ if (blknr == 0)
+ goto fail;
+
+ /* read the blocks of parenet inode */
+ block_buffer = zalloc(fs->blksz);
+ if (!block_buffer)
+ goto fail;
+
+ status = ext4fs_devread((lbaint_t)blknr * fs->sect_perblk,
+ 0, fs->blksz, (char *)block_buffer);
+ if (status == 0)
+ goto fail;
+
+ dir = (struct ext2_dirent *)block_buffer;
+ ptr = (char *)dir;
+ totalbytes = 0;
+ while (dir->direntlen >= 0) {
+ /*
+ * blocksize-totalbytes because last directory
+ * length i.e.,*dir->direntlen is free availble
+ * space in the block that means
+ * it is a last entry of directory entry
+ */
+ if (strlen(dirname) == dir->namelen) {
+ if (strncmp(dirname, ptr +
+ sizeof(struct ext2_dirent),
+ dir->namelen) == 0) {
+ previous_dir->direntlen +=
+ dir->direntlen;
+ inodeno = dir->inode;
+ dir->inode = 0;
+ found = 1;
+ break;
+ }
+ }
+
+ if (fs->blksz - totalbytes == dir->direntlen)
+ break;
+
+ /* traversing the each directory entry */
+ templength = dir->direntlen;
+ totalbytes = totalbytes + templength;
+ previous_dir = dir;
+ dir = (struct ext2_dirent *)((char *)dir + templength);
+ ptr = (char *)dir;
+ }
+
+ if (found == 1) {
+ free(block_buffer);
+ block_buffer = NULL;
+ return inodeno;
+ }
+
+ free(block_buffer);
+ block_buffer = NULL;
+ }
+
+fail:
+ free(block_buffer);
+
+ return -1;
+}
+
+static int find_dir_depth(char *dirname)
+{
+ char *token = strtok(dirname, "/");
+ int count = 0;
+ while (token != NULL) {
+ token = strtok(NULL, "/");
+ count++;
+ }
+ return count + 1 + 1;
+ /*
+ * for example for string /home/temp
+ * depth=home(1)+temp(1)+1 extra for NULL;
+ * so count is 4;
+ */
+}
+
+static int parse_path(char **arr, char *dirname)
+{
+ char *token = strtok(dirname, "/");
+ int i = 0;
+
+ /* add root */
+ arr[i] = zalloc(strlen("/") + 1);
+ if (!arr[i])
+ return -ENOMEM;
+
+ arr[i++] = "/";
+
+ /* add each path entry after root */
+ while (token != NULL) {
+ arr[i] = zalloc(strlen(token) + 1);
+ if (!arr[i])
+ return -ENOMEM;
+ memcpy(arr[i++], token, strlen(token));
+ token = strtok(NULL, "/");
+ }
+ arr[i] = NULL;
+
+ return 0;
+}
+
+int ext4fs_iget(int inode_no, struct ext2_inode *inode)
+{
+ if (ext4fs_read_inode(ext4fs_root, inode_no, inode) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Function: ext4fs_get_parent_inode_num
+ * Return Value: inode Number of the parent directory of file/Directory to be
+ * created
+ * dirname : Input parmater, input path name of the file/directory to be created
+ * dname : Output parameter, to be filled with the name of the directory
+ * extracted from dirname
+ */
+int ext4fs_get_parent_inode_num(const char *dirname, char *dname, int flags)
+{
+ int i;
+ int depth = 0;
+ int matched_inode_no;
+ int result_inode_no = -1;
+ char **ptr = NULL;
+ char *depth_dirname = NULL;
+ char *parse_dirname = NULL;
+ struct ext2_inode *parent_inode = NULL;
+ struct ext2_inode *first_inode = NULL;
+ struct ext2_inode temp_inode;
+
+ if (*dirname != '/') {
+ printf("Please supply Absolute path\n");
+ return -1;
+ }
+
+ /* TODO: input validation make equivalent to linux */
+ depth_dirname = zalloc(strlen(dirname) + 1);
+ if (!depth_dirname)
+ return -ENOMEM;
+
+ memcpy(depth_dirname, dirname, strlen(dirname));
+ depth = find_dir_depth(depth_dirname);
+ parse_dirname = zalloc(strlen(dirname) + 1);
+ if (!parse_dirname)
+ goto fail;
+ memcpy(parse_dirname, dirname, strlen(dirname));
+
+ /* allocate memory for each directory level */
+ ptr = zalloc((depth) * sizeof(char *));
+ if (!ptr)
+ goto fail;
+ if (parse_path(ptr, parse_dirname))
+ goto fail;
+ parent_inode = zalloc(sizeof(struct ext2_inode));
+ if (!parent_inode)
+ goto fail;
+ first_inode = zalloc(sizeof(struct ext2_inode));
+ if (!first_inode)
+ goto fail;
+ memcpy(parent_inode, ext4fs_root->inode, sizeof(struct ext2_inode));
+ memcpy(first_inode, parent_inode, sizeof(struct ext2_inode));
+ if (flags & F_FILE)
+ result_inode_no = EXT2_ROOT_INO;
+ for (i = 1; i < depth; i++) {
+ matched_inode_no = search_dir(parent_inode, ptr[i]);
+ if (matched_inode_no == -1) {
+ if (ptr[i + 1] == NULL && i == 1) {
+ result_inode_no = EXT2_ROOT_INO;
+ goto end;
+ } else {
+ if (ptr[i + 1] == NULL)
+ break;
+ printf("Invalid path\n");
+ result_inode_no = -1;
+ goto fail;
+ }
+ } else {
+ if (ptr[i + 1] != NULL) {
+ memset(parent_inode, '\0',
+ sizeof(struct ext2_inode));
+ if (ext4fs_iget(matched_inode_no,
+ parent_inode)) {
+ result_inode_no = -1;
+ goto fail;
+ }
+ result_inode_no = matched_inode_no;
+ } else {
+ break;
+ }
+ }
+ }
+
+end:
+ if (i == 1)
+ matched_inode_no = search_dir(first_inode, ptr[i]);
+ else
+ matched_inode_no = search_dir(parent_inode, ptr[i]);
+
+ if (matched_inode_no != -1) {
+ ext4fs_iget(matched_inode_no, &temp_inode);
+ if (temp_inode.mode & S_IFDIR) {
+ printf("It is a Directory\n");
+ result_inode_no = -1;
+ goto fail;
+ }
+ }
+
+ if (strlen(ptr[i]) > 256) {
+ result_inode_no = -1;
+ goto fail;
+ }
+ memcpy(dname, ptr[i], strlen(ptr[i]));
+
+fail:
+ free(depth_dirname);
+ free(parse_dirname);
+ free(ptr);
+ free(parent_inode);
+ free(first_inode);
+
+ return result_inode_no;
+}
+
+static int check_filename(char *filename, unsigned int blknr)
+{
+ unsigned int first_block_no_of_root;
+ int totalbytes = 0;
+ int templength = 0;
+ int status, inodeno;
+ int found = 0;
+ char *root_first_block_buffer = NULL;
+ char *root_first_block_addr = NULL;
+ struct ext2_dirent *dir = NULL;
+ struct ext2_dirent *previous_dir = NULL;
+ char *ptr = NULL;
+ struct ext_filesystem *fs = get_fs();
+
+ /* get the first block of root */
+ first_block_no_of_root = blknr;
+ root_first_block_buffer = zalloc(fs->blksz);
+ if (!root_first_block_buffer)
+ return -ENOMEM;
+ root_first_block_addr = root_first_block_buffer;
+ status = ext4fs_devread((lbaint_t)first_block_no_of_root *
+ fs->sect_perblk, 0,
+ fs->blksz, root_first_block_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(root_first_block_buffer, first_block_no_of_root))
+ goto fail;
+ dir = (struct ext2_dirent *)root_first_block_buffer;
+ ptr = (char *)dir;
+ totalbytes = 0;
+ while (dir->direntlen >= 0) {
+ /*
+ * blocksize-totalbytes because last
+ * directory length i.e., *dir->direntlen
+ * is free availble space in the block that
+ * means it is a last entry of directory entry
+ */
+ if (strlen(filename) == dir->namelen) {
+ if (strncmp(filename, ptr + sizeof(struct ext2_dirent),
+ dir->namelen) == 0) {
+ printf("file found deleting\n");
+ previous_dir->direntlen += dir->direntlen;
+ inodeno = dir->inode;
+ dir->inode = 0;
+ found = 1;
+ break;
+ }
+ }
+
+ if (fs->blksz - totalbytes == dir->direntlen)
+ break;
+
+ /* traversing the each directory entry */
+ templength = dir->direntlen;
+ totalbytes = totalbytes + templength;
+ previous_dir = dir;
+ dir = (struct ext2_dirent *)((char *)dir + templength);
+ ptr = (char *)dir;
+ }
+
+
+ if (found == 1) {
+ if (ext4fs_put_metadata(root_first_block_addr,
+ first_block_no_of_root))
+ goto fail;
+ return inodeno;
+ }
+fail:
+ free(root_first_block_buffer);
+
+ return -1;
+}
+
+int ext4fs_filename_check(char *filename)
+{
+ short direct_blk_idx = 0;
+ long int blknr = -1;
+ int inodeno = -1;
+
+ /* read the block no allocated to a file */
+ for (direct_blk_idx = 0; direct_blk_idx < INDIRECT_BLOCKS;
+ direct_blk_idx++) {
+ blknr = read_allocated_block(g_parent_inode, direct_blk_idx);
+ if (blknr == 0)
+ break;
+ inodeno = check_filename(filename, blknr);
+ if (inodeno != -1)
+ return inodeno;
+ }
+
+ return -1;
+}
+
+long int ext4fs_get_new_blk_no(void)
+{
+ short i;
+ short status;
+ int remainder;
+ unsigned int bg_idx;
+ static int prev_bg_bitmap_index = -1;
+ unsigned int blk_per_grp = ext4fs_root->sblock.blocks_per_group;
+ struct ext_filesystem *fs = get_fs();
+ char *journal_buffer = zalloc(fs->blksz);
+ char *zero_buffer = zalloc(fs->blksz);
+ if (!journal_buffer || !zero_buffer)
+ goto fail;
+ struct ext2_block_group *bgd = (struct ext2_block_group *)fs->gdtable;
+
+ if (fs->first_pass_bbmap == 0) {
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ if (bgd[i].free_blocks) {
+ if (bgd[i].bg_flags & EXT4_BG_BLOCK_UNINIT) {
+ put_ext4(((uint64_t) ((uint64_t)bgd[i].block_id *
+ (uint64_t)fs->blksz)),
+ zero_buffer, fs->blksz);
+ bgd[i].bg_flags =
+ bgd[i].
+ bg_flags & ~EXT4_BG_BLOCK_UNINIT;
+ memcpy(fs->blk_bmaps[i], zero_buffer,
+ fs->blksz);
+ }
+ fs->curr_blkno =
+ _get_new_blk_no(fs->blk_bmaps[i]);
+ if (fs->curr_blkno == -1)
+ /* if block bitmap is completely fill */
+ continue;
+ fs->curr_blkno = fs->curr_blkno +
+ (i * fs->blksz * 8);
+ fs->first_pass_bbmap++;
+ bgd[i].free_blocks--;
+ fs->sb->free_blocks--;
+ status = ext4fs_devread((lbaint_t)
+ bgd[i].block_id *
+ fs->sect_perblk, 0,
+ fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[i].block_id))
+ goto fail;
+ goto success;
+ } else {
+ debug("no space left on block group %d\n", i);
+ }
+ }
+
+ goto fail;
+ } else {
+restart:
+ fs->curr_blkno++;
+ /* get the blockbitmap index respective to blockno */
+ bg_idx = fs->curr_blkno / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = fs->curr_blkno % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+
+ /*
+ * To skip completely filled block group bitmaps
+ * Optimize the block allocation
+ */
+ if (bg_idx >= fs->no_blkgrp)
+ goto fail;
+
+ if (bgd[bg_idx].free_blocks == 0) {
+ debug("block group %u is full. Skipping\n", bg_idx);
+ fs->curr_blkno = fs->curr_blkno + blk_per_grp;
+ fs->curr_blkno--;
+ goto restart;
+ }
+
+ if (bgd[bg_idx].bg_flags & EXT4_BG_BLOCK_UNINIT) {
+ memset(zero_buffer, '\0', fs->blksz);
+ put_ext4(((uint64_t) ((uint64_t)bgd[bg_idx].block_id *
+ (uint64_t)fs->blksz)), zero_buffer, fs->blksz);
+ memcpy(fs->blk_bmaps[bg_idx], zero_buffer, fs->blksz);
+ bgd[bg_idx].bg_flags = bgd[bg_idx].bg_flags &
+ ~EXT4_BG_BLOCK_UNINIT;
+ }
+
+ if (ext4fs_set_block_bmap(fs->curr_blkno, fs->blk_bmaps[bg_idx],
+ bg_idx) != 0) {
+ debug("going for restart for the block no %ld %u\n",
+ fs->curr_blkno, bg_idx);
+ goto restart;
+ }
+
+ /* journal backup */
+ if (prev_bg_bitmap_index != bg_idx) {
+ memset(journal_buffer, '\0', fs->blksz);
+ status = ext4fs_devread((lbaint_t)bgd[bg_idx].block_id
+ * fs->sect_perblk,
+ 0, fs->blksz, journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+
+ prev_bg_bitmap_index = bg_idx;
+ }
+ bgd[bg_idx].free_blocks--;
+ fs->sb->free_blocks--;
+ goto success;
+ }
+success:
+ free(journal_buffer);
+ free(zero_buffer);
+
+ return fs->curr_blkno;
+fail:
+ free(journal_buffer);
+ free(zero_buffer);
+
+ return -1;
+}
+
+int ext4fs_get_new_inode_no(void)
+{
+ short i;
+ short status;
+ unsigned int ibmap_idx;
+ static int prev_inode_bitmap_index = -1;
+ unsigned int inodes_per_grp = ext4fs_root->sblock.inodes_per_group;
+ struct ext_filesystem *fs = get_fs();
+ char *journal_buffer = zalloc(fs->blksz);
+ char *zero_buffer = zalloc(fs->blksz);
+ if (!journal_buffer || !zero_buffer)
+ goto fail;
+ struct ext2_block_group *bgd = (struct ext2_block_group *)fs->gdtable;
+
+ if (fs->first_pass_ibmap == 0) {
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ if (bgd[i].free_inodes) {
+ if (bgd[i].bg_itable_unused !=
+ bgd[i].free_inodes)
+ bgd[i].bg_itable_unused =
+ bgd[i].free_inodes;
+ if (bgd[i].bg_flags & EXT4_BG_INODE_UNINIT) {
+ put_ext4(((uint64_t)
+ ((uint64_t)bgd[i].inode_id *
+ (uint64_t)fs->blksz)),
+ zero_buffer, fs->blksz);
+ bgd[i].bg_flags = bgd[i].bg_flags &
+ ~EXT4_BG_INODE_UNINIT;
+ memcpy(fs->inode_bmaps[i],
+ zero_buffer, fs->blksz);
+ }
+ fs->curr_inode_no =
+ _get_new_inode_no(fs->inode_bmaps[i]);
+ if (fs->curr_inode_no == -1)
+ /* if block bitmap is completely fill */
+ continue;
+ fs->curr_inode_no = fs->curr_inode_no +
+ (i * inodes_per_grp);
+ fs->first_pass_ibmap++;
+ bgd[i].free_inodes--;
+ bgd[i].bg_itable_unused--;
+ fs->sb->free_inodes--;
+ status = ext4fs_devread((lbaint_t)
+ bgd[i].inode_id *
+ fs->sect_perblk, 0,
+ fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[i].inode_id))
+ goto fail;
+ goto success;
+ } else
+ debug("no inode left on block group %d\n", i);
+ }
+ goto fail;
+ } else {
+restart:
+ fs->curr_inode_no++;
+ /* get the blockbitmap index respective to blockno */
+ ibmap_idx = fs->curr_inode_no / inodes_per_grp;
+ if (bgd[ibmap_idx].bg_flags & EXT4_BG_INODE_UNINIT) {
+ memset(zero_buffer, '\0', fs->blksz);
+ put_ext4(((uint64_t) ((uint64_t)bgd[ibmap_idx].inode_id *
+ (uint64_t)fs->blksz)), zero_buffer,
+ fs->blksz);
+ bgd[ibmap_idx].bg_flags =
+ bgd[ibmap_idx].bg_flags & ~EXT4_BG_INODE_UNINIT;
+ memcpy(fs->inode_bmaps[ibmap_idx], zero_buffer,
+ fs->blksz);
+ }
+
+ if (ext4fs_set_inode_bmap(fs->curr_inode_no,
+ fs->inode_bmaps[ibmap_idx],
+ ibmap_idx) != 0) {
+ debug("going for restart for the block no %d %u\n",
+ fs->curr_inode_no, ibmap_idx);
+ goto restart;
+ }
+
+ /* journal backup */
+ if (prev_inode_bitmap_index != ibmap_idx) {
+ memset(journal_buffer, '\0', fs->blksz);
+ status = ext4fs_devread((lbaint_t)
+ bgd[ibmap_idx].inode_id
+ * fs->sect_perblk,
+ 0, fs->blksz, journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[ibmap_idx].inode_id))
+ goto fail;
+ prev_inode_bitmap_index = ibmap_idx;
+ }
+ if (bgd[ibmap_idx].bg_itable_unused !=
+ bgd[ibmap_idx].free_inodes)
+ bgd[ibmap_idx].bg_itable_unused =
+ bgd[ibmap_idx].free_inodes;
+ bgd[ibmap_idx].free_inodes--;
+ bgd[ibmap_idx].bg_itable_unused--;
+ fs->sb->free_inodes--;
+ goto success;
+ }
+
+success:
+ free(journal_buffer);
+ free(zero_buffer);
+
+ return fs->curr_inode_no;
+fail:
+ free(journal_buffer);
+ free(zero_buffer);
+
+ return -1;
+
+}
+
+
+static void alloc_single_indirect_block(struct ext2_inode *file_inode,
+ unsigned int *total_remaining_blocks,
+ unsigned int *no_blks_reqd)
+{
+ short i;
+ short status;
+ long int actual_block_no;
+ long int si_blockno;
+ /* si :single indirect */
+ unsigned int *si_buffer = NULL;
+ unsigned int *si_start_addr = NULL;
+ struct ext_filesystem *fs = get_fs();
+
+ if (*total_remaining_blocks != 0) {
+ si_buffer = zalloc(fs->blksz);
+ if (!si_buffer) {
+ printf("No Memory\n");
+ return;
+ }
+ si_start_addr = si_buffer;
+ si_blockno = ext4fs_get_new_blk_no();
+ if (si_blockno == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ (*no_blks_reqd)++;
+ debug("SIPB %ld: %u\n", si_blockno, *total_remaining_blocks);
+
+ status = ext4fs_devread((lbaint_t)si_blockno * fs->sect_perblk,
+ 0, fs->blksz, (char *)si_buffer);
+ memset(si_buffer, '\0', fs->blksz);
+ if (status == 0)
+ goto fail;
+
+ for (i = 0; i < (fs->blksz / sizeof(int)); i++) {
+ actual_block_no = ext4fs_get_new_blk_no();
+ if (actual_block_no == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ *si_buffer = actual_block_no;
+ debug("SIAB %u: %u\n", *si_buffer,
+ *total_remaining_blocks);
+
+ si_buffer++;
+ (*total_remaining_blocks)--;
+ if (*total_remaining_blocks == 0)
+ break;
+ }
+
+ /* write the block to disk */
+ put_ext4(((uint64_t) ((uint64_t)si_blockno * (uint64_t)fs->blksz)),
+ si_start_addr, fs->blksz);
+ file_inode->b.blocks.indir_block = si_blockno;
+ }
+fail:
+ free(si_start_addr);
+}
+
+static void alloc_double_indirect_block(struct ext2_inode *file_inode,
+ unsigned int *total_remaining_blocks,
+ unsigned int *no_blks_reqd)
+{
+ short i;
+ short j;
+ short status;
+ long int actual_block_no;
+ /* di:double indirect */
+ long int di_blockno_parent;
+ long int di_blockno_child;
+ unsigned int *di_parent_buffer = NULL;
+ unsigned int *di_child_buff = NULL;
+ unsigned int *di_block_start_addr = NULL;
+ unsigned int *di_child_buff_start = NULL;
+ struct ext_filesystem *fs = get_fs();
+
+ if (*total_remaining_blocks != 0) {
+ /* double indirect parent block connecting to inode */
+ di_blockno_parent = ext4fs_get_new_blk_no();
+ if (di_blockno_parent == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ di_parent_buffer = zalloc(fs->blksz);
+ if (!di_parent_buffer)
+ goto fail;
+
+ di_block_start_addr = di_parent_buffer;
+ (*no_blks_reqd)++;
+ debug("DIPB %ld: %u\n", di_blockno_parent,
+ *total_remaining_blocks);
+
+ status = ext4fs_devread((lbaint_t)di_blockno_parent *
+ fs->sect_perblk, 0,
+ fs->blksz, (char *)di_parent_buffer);
+
+ if (!status) {
+ printf("%s: Device read error!\n", __func__);
+ goto fail;
+ }
+ memset(di_parent_buffer, '\0', fs->blksz);
+
+ /*
+ * start:for each double indirect parent
+ * block create one more block
+ */
+ for (i = 0; i < (fs->blksz / sizeof(int)); i++) {
+ di_blockno_child = ext4fs_get_new_blk_no();
+ if (di_blockno_child == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ di_child_buff = zalloc(fs->blksz);
+ if (!di_child_buff)
+ goto fail;
+
+ di_child_buff_start = di_child_buff;
+ *di_parent_buffer = di_blockno_child;
+ di_parent_buffer++;
+ (*no_blks_reqd)++;
+ debug("DICB %ld: %u\n", di_blockno_child,
+ *total_remaining_blocks);
+
+ status = ext4fs_devread((lbaint_t)di_blockno_child *
+ fs->sect_perblk, 0,
+ fs->blksz,
+ (char *)di_child_buff);
+
+ if (!status) {
+ printf("%s: Device read error!\n", __func__);
+ goto fail;
+ }
+ memset(di_child_buff, '\0', fs->blksz);
+ /* filling of actual datablocks for each child */
+ for (j = 0; j < (fs->blksz / sizeof(int)); j++) {
+ actual_block_no = ext4fs_get_new_blk_no();
+ if (actual_block_no == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ *di_child_buff = actual_block_no;
+ debug("DIAB %ld: %u\n", actual_block_no,
+ *total_remaining_blocks);
+
+ di_child_buff++;
+ (*total_remaining_blocks)--;
+ if (*total_remaining_blocks == 0)
+ break;
+ }
+ /* write the block table */
+ put_ext4(((uint64_t) ((uint64_t)di_blockno_child * (uint64_t)fs->blksz)),
+ di_child_buff_start, fs->blksz);
+ free(di_child_buff_start);
+ di_child_buff_start = NULL;
+
+ if (*total_remaining_blocks == 0)
+ break;
+ }
+ put_ext4(((uint64_t) ((uint64_t)di_blockno_parent * (uint64_t)fs->blksz)),
+ di_block_start_addr, fs->blksz);
+ file_inode->b.blocks.double_indir_block = di_blockno_parent;
+ }
+fail:
+ free(di_block_start_addr);
+}
+
+static void alloc_triple_indirect_block(struct ext2_inode *file_inode,
+ unsigned int *total_remaining_blocks,
+ unsigned int *no_blks_reqd)
+{
+ short i;
+ short j;
+ short k;
+ long int actual_block_no;
+ /* ti: Triple Indirect */
+ long int ti_gp_blockno;
+ long int ti_parent_blockno;
+ long int ti_child_blockno;
+ unsigned int *ti_gp_buff = NULL;
+ unsigned int *ti_parent_buff = NULL;
+ unsigned int *ti_child_buff = NULL;
+ unsigned int *ti_gp_buff_start_addr = NULL;
+ unsigned int *ti_pbuff_start_addr = NULL;
+ unsigned int *ti_cbuff_start_addr = NULL;
+ struct ext_filesystem *fs = get_fs();
+ if (*total_remaining_blocks != 0) {
+ /* triple indirect grand parent block connecting to inode */
+ ti_gp_blockno = ext4fs_get_new_blk_no();
+ if (ti_gp_blockno == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ ti_gp_buff = zalloc(fs->blksz);
+ if (!ti_gp_buff)
+ goto fail;
+
+ ti_gp_buff_start_addr = ti_gp_buff;
+ (*no_blks_reqd)++;
+ debug("TIGPB %ld: %u\n", ti_gp_blockno,
+ *total_remaining_blocks);
+
+ /* for each 4 byte grand parent entry create one more block */
+ for (i = 0; i < (fs->blksz / sizeof(int)); i++) {
+ ti_parent_blockno = ext4fs_get_new_blk_no();
+ if (ti_parent_blockno == -1) {
+ printf("no block left to assign\n");
+ goto fail;
+ }
+ ti_parent_buff = zalloc(fs->blksz);
+ if (!ti_parent_buff)
+ goto fail;
+
+ ti_pbuff_start_addr = ti_parent_buff;
+ *ti_gp_buff = ti_parent_blockno;
+ ti_gp_buff++;
+ (*no_blks_reqd)++;
+ debug("TIPB %ld: %u\n", ti_parent_blockno,
+ *total_remaining_blocks);
+
+ /* for each 4 byte entry parent create one more block */
+ for (j = 0; j < (fs->blksz / sizeof(int)); j++) {
+ ti_child_blockno = ext4fs_get_new_blk_no();
+ if (ti_child_blockno == -1) {
+ printf("no block left assign\n");
+ goto fail;
+ }
+ ti_child_buff = zalloc(fs->blksz);
+ if (!ti_child_buff)
+ goto fail;
+
+ ti_cbuff_start_addr = ti_child_buff;
+ *ti_parent_buff = ti_child_blockno;
+ ti_parent_buff++;
+ (*no_blks_reqd)++;
+ debug("TICB %ld: %u\n", ti_parent_blockno,
+ *total_remaining_blocks);
+
+ /* fill actual datablocks for each child */
+ for (k = 0; k < (fs->blksz / sizeof(int));
+ k++) {
+ actual_block_no =
+ ext4fs_get_new_blk_no();
+ if (actual_block_no == -1) {
+ printf("no block left\n");
+ goto fail;
+ }
+ *ti_child_buff = actual_block_no;
+ debug("TIAB %ld: %u\n", actual_block_no,
+ *total_remaining_blocks);
+
+ ti_child_buff++;
+ (*total_remaining_blocks)--;
+ if (*total_remaining_blocks == 0)
+ break;
+ }
+ /* write the child block */
+ put_ext4(((uint64_t) ((uint64_t)ti_child_blockno *
+ (uint64_t)fs->blksz)),
+ ti_cbuff_start_addr, fs->blksz);
+ free(ti_cbuff_start_addr);
+
+ if (*total_remaining_blocks == 0)
+ break;
+ }
+ /* write the parent block */
+ put_ext4(((uint64_t) ((uint64_t)ti_parent_blockno * (uint64_t)fs->blksz)),
+ ti_pbuff_start_addr, fs->blksz);
+ free(ti_pbuff_start_addr);
+
+ if (*total_remaining_blocks == 0)
+ break;
+ }
+ /* write the grand parent block */
+ put_ext4(((uint64_t) ((uint64_t)ti_gp_blockno * (uint64_t)fs->blksz)),
+ ti_gp_buff_start_addr, fs->blksz);
+ file_inode->b.blocks.triple_indir_block = ti_gp_blockno;
+ }
+fail:
+ free(ti_gp_buff_start_addr);
+}
+
+void ext4fs_allocate_blocks(struct ext2_inode *file_inode,
+ unsigned int total_remaining_blocks,
+ unsigned int *total_no_of_block)
+{
+ short i;
+ long int direct_blockno;
+ unsigned int no_blks_reqd = 0;
+
+ /* allocation of direct blocks */
+ for (i = 0; i < INDIRECT_BLOCKS; i++) {
+ direct_blockno = ext4fs_get_new_blk_no();
+ if (direct_blockno == -1) {
+ printf("no block left to assign\n");
+ return;
+ }
+ file_inode->b.blocks.dir_blocks[i] = direct_blockno;
+ debug("DB %ld: %u\n", direct_blockno, total_remaining_blocks);
+
+ total_remaining_blocks--;
+ if (total_remaining_blocks == 0)
+ break;
+ }
+
+ alloc_single_indirect_block(file_inode, &total_remaining_blocks,
+ &no_blks_reqd);
+ alloc_double_indirect_block(file_inode, &total_remaining_blocks,
+ &no_blks_reqd);
+ alloc_triple_indirect_block(file_inode, &total_remaining_blocks,
+ &no_blks_reqd);
+ *total_no_of_block += no_blks_reqd;
+}
+
+#endif
+
+static struct ext4_extent_header *ext4fs_get_extent_block
+ (struct ext2_data *data, char *buf,
+ struct ext4_extent_header *ext_block,
+ uint32_t fileblock, int log2_blksz)
+{
+ struct ext4_extent_idx *index;
+ unsigned long long block;
+ int blksz = EXT2_BLOCK_SIZE(data);
+ int i;
+
+ while (1) {
+ index = (struct ext4_extent_idx *)(ext_block + 1);
+
+ if (le16_to_cpu(ext_block->eh_magic) != EXT4_EXT_MAGIC)
+ return 0;
+
+ if (ext_block->eh_depth == 0)
+ return ext_block;
+ i = -1;
+ do {
+ i++;
+ if (i >= le16_to_cpu(ext_block->eh_entries))
+ break;
+ } while (fileblock >= le32_to_cpu(index[i].ei_block));
+
+ if (--i < 0)
+ return 0;
+
+ block = le16_to_cpu(index[i].ei_leaf_hi);
+ block = (block << 32) + le32_to_cpu(index[i].ei_leaf_lo);
+
+ if (ext4fs_devread((lbaint_t)block << log2_blksz, 0, blksz,
+ buf))
+ ext_block = (struct ext4_extent_header *)buf;
+ else
+ return 0;
+ }
+}
+
+static int ext4fs_blockgroup
+ (struct ext2_data *data, int group, struct ext2_block_group *blkgrp)
+{
+ long int blkno;
+ unsigned int blkoff, desc_per_blk;
+ int log2blksz = get_fs()->dev_desc->log2blksz;
+
+ desc_per_blk = EXT2_BLOCK_SIZE(data) / sizeof(struct ext2_block_group);
+
+ blkno = __le32_to_cpu(data->sblock.first_data_block) + 1 +
+ group / desc_per_blk;
+ blkoff = (group % desc_per_blk) * sizeof(struct ext2_block_group);
+
+ debug("ext4fs read %d group descriptor (blkno %ld blkoff %u)\n",
+ group, blkno, blkoff);
+
+ return ext4fs_devread((lbaint_t)blkno <<
+ (LOG2_BLOCK_SIZE(data) - log2blksz),
+ blkoff, sizeof(struct ext2_block_group),
+ (char *)blkgrp);
+}
+
+int ext4fs_read_inode(struct ext2_data *data, int ino, struct ext2_inode *inode)
+{
+ struct ext2_block_group blkgrp;
+ struct ext2_sblock *sblock = &data->sblock;
+ struct ext_filesystem *fs = get_fs();
+ int log2blksz = get_fs()->dev_desc->log2blksz;
+ int inodes_per_block, status;
+ long int blkno;
+ unsigned int blkoff;
+
+ /* It is easier to calculate if the first inode is 0. */
+ ino--;
+ status = ext4fs_blockgroup(data, ino / __le32_to_cpu
+ (sblock->inodes_per_group), &blkgrp);
+ if (status == 0)
+ return 0;
+
+ inodes_per_block = EXT2_BLOCK_SIZE(data) / fs->inodesz;
+ blkno = __le32_to_cpu(blkgrp.inode_table_id) +
+ (ino % __le32_to_cpu(sblock->inodes_per_group)) / inodes_per_block;
+ blkoff = (ino % inodes_per_block) * fs->inodesz;
+ /* Read the inode. */
+ status = ext4fs_devread((lbaint_t)blkno << (LOG2_BLOCK_SIZE(data) -
+ log2blksz), blkoff,
+ sizeof(struct ext2_inode), (char *)inode);
+ if (status == 0)
+ return 0;
+
+ return 1;
+}
+
+long int read_allocated_block(struct ext2_inode *inode, int fileblock)
+{
+ long int blknr;
+ int blksz;
+ int log2_blksz;
+ int status;
+ long int rblock;
+ long int perblock_parent;
+ long int perblock_child;
+ unsigned long long start;
+ /* get the blocksize of the filesystem */
+ blksz = EXT2_BLOCK_SIZE(ext4fs_root);
+ log2_blksz = LOG2_BLOCK_SIZE(ext4fs_root)
+ - get_fs()->dev_desc->log2blksz;
+
+ if (le32_to_cpu(inode->flags) & EXT4_EXTENTS_FL) {
+ char *buf = zalloc(blksz);
+ if (!buf)
+ return -ENOMEM;
+ struct ext4_extent_header *ext_block;
+ struct ext4_extent *extent;
+ int i = -1;
+ ext_block =
+ ext4fs_get_extent_block(ext4fs_root, buf,
+ (struct ext4_extent_header *)
+ inode->b.blocks.dir_blocks,
+ fileblock, log2_blksz);
+ if (!ext_block) {
+ printf("invalid extent block\n");
+ free(buf);
+ return -EINVAL;
+ }
+
+ extent = (struct ext4_extent *)(ext_block + 1);
+
+ do {
+ i++;
+ if (i >= le16_to_cpu(ext_block->eh_entries))
+ break;
+ } while (fileblock >= le32_to_cpu(extent[i].ee_block));
+ if (--i >= 0) {
+ fileblock -= le32_to_cpu(extent[i].ee_block);
+ if (fileblock >= le16_to_cpu(extent[i].ee_len)) {
+ free(buf);
+ return 0;
+ }
+
+ start = le16_to_cpu(extent[i].ee_start_hi);
+ start = (start << 32) +
+ le32_to_cpu(extent[i].ee_start_lo);
+ free(buf);
+ return fileblock + start;
+ }
+
+ printf("Extent Error\n");
+ free(buf);
+ return -1;
+ }
+
+ /* Direct blocks. */
+ if (fileblock < INDIRECT_BLOCKS)
+ blknr = __le32_to_cpu(inode->b.blocks.dir_blocks[fileblock]);
+
+ /* Indirect. */
+ else if (fileblock < (INDIRECT_BLOCKS + (blksz / 4))) {
+ if (ext4fs_indir1_block == NULL) {
+ ext4fs_indir1_block = zalloc(blksz);
+ if (ext4fs_indir1_block == NULL) {
+ printf("** SI ext2fs read block (indir 1)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_size = blksz;
+ ext4fs_indir1_blkno = -1;
+ }
+ if (blksz != ext4fs_indir1_size) {
+ free(ext4fs_indir1_block);
+ ext4fs_indir1_block = NULL;
+ ext4fs_indir1_size = 0;
+ ext4fs_indir1_blkno = -1;
+ ext4fs_indir1_block = zalloc(blksz);
+ if (ext4fs_indir1_block == NULL) {
+ printf("** SI ext2fs read block (indir 1):"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_size = blksz;
+ }
+ if ((__le32_to_cpu(inode->b.blocks.indir_block) <<
+ log2_blksz) != ext4fs_indir1_blkno) {
+ status =
+ ext4fs_devread((lbaint_t)__le32_to_cpu
+ (inode->b.blocks.
+ indir_block) << log2_blksz, 0,
+ blksz, (char *)ext4fs_indir1_block);
+ if (status == 0) {
+ printf("** SI ext2fs read block (indir 1)"
+ "failed. **\n");
+ return 0;
+ }
+ ext4fs_indir1_blkno =
+ __le32_to_cpu(inode->b.blocks.
+ indir_block) << log2_blksz;
+ }
+ blknr = __le32_to_cpu(ext4fs_indir1_block
+ [fileblock - INDIRECT_BLOCKS]);
+ }
+ /* Double indirect. */
+ else if (fileblock < (INDIRECT_BLOCKS + (blksz / 4 *
+ (blksz / 4 + 1)))) {
+
+ long int perblock = blksz / 4;
+ long int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4);
+
+ if (ext4fs_indir1_block == NULL) {
+ ext4fs_indir1_block = zalloc(blksz);
+ if (ext4fs_indir1_block == NULL) {
+ printf("** DI ext2fs read block (indir 2 1)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_size = blksz;
+ ext4fs_indir1_blkno = -1;
+ }
+ if (blksz != ext4fs_indir1_size) {
+ free(ext4fs_indir1_block);
+ ext4fs_indir1_block = NULL;
+ ext4fs_indir1_size = 0;
+ ext4fs_indir1_blkno = -1;
+ ext4fs_indir1_block = zalloc(blksz);
+ if (ext4fs_indir1_block == NULL) {
+ printf("** DI ext2fs read block (indir 2 1)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_size = blksz;
+ }
+ if ((__le32_to_cpu(inode->b.blocks.double_indir_block) <<
+ log2_blksz) != ext4fs_indir1_blkno) {
+ status =
+ ext4fs_devread((lbaint_t)__le32_to_cpu
+ (inode->b.blocks.
+ double_indir_block) << log2_blksz,
+ 0, blksz,
+ (char *)ext4fs_indir1_block);
+ if (status == 0) {
+ printf("** DI ext2fs read block (indir 2 1)"
+ "failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_blkno =
+ __le32_to_cpu(inode->b.blocks.double_indir_block) <<
+ log2_blksz;
+ }
+
+ if (ext4fs_indir2_block == NULL) {
+ ext4fs_indir2_block = zalloc(blksz);
+ if (ext4fs_indir2_block == NULL) {
+ printf("** DI ext2fs read block (indir 2 2)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir2_size = blksz;
+ ext4fs_indir2_blkno = -1;
+ }
+ if (blksz != ext4fs_indir2_size) {
+ free(ext4fs_indir2_block);
+ ext4fs_indir2_block = NULL;
+ ext4fs_indir2_size = 0;
+ ext4fs_indir2_blkno = -1;
+ ext4fs_indir2_block = zalloc(blksz);
+ if (ext4fs_indir2_block == NULL) {
+ printf("** DI ext2fs read block (indir 2 2)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir2_size = blksz;
+ }
+ if ((__le32_to_cpu(ext4fs_indir1_block[rblock / perblock]) <<
+ log2_blksz) != ext4fs_indir2_blkno) {
+ status = ext4fs_devread((lbaint_t)__le32_to_cpu
+ (ext4fs_indir1_block
+ [rblock /
+ perblock]) << log2_blksz, 0,
+ blksz,
+ (char *)ext4fs_indir2_block);
+ if (status == 0) {
+ printf("** DI ext2fs read block (indir 2 2)"
+ "failed. **\n");
+ return -1;
+ }
+ ext4fs_indir2_blkno =
+ __le32_to_cpu(ext4fs_indir1_block[rblock
+ /
+ perblock]) <<
+ log2_blksz;
+ }
+ blknr = __le32_to_cpu(ext4fs_indir2_block[rblock % perblock]);
+ }
+ /* Tripple indirect. */
+ else {
+ rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4 +
+ (blksz / 4 * blksz / 4));
+ perblock_child = blksz / 4;
+ perblock_parent = ((blksz / 4) * (blksz / 4));
+
+ if (ext4fs_indir1_block == NULL) {
+ ext4fs_indir1_block = zalloc(blksz);
+ if (ext4fs_indir1_block == NULL) {
+ printf("** TI ext2fs read block (indir 2 1)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_size = blksz;
+ ext4fs_indir1_blkno = -1;
+ }
+ if (blksz != ext4fs_indir1_size) {
+ free(ext4fs_indir1_block);
+ ext4fs_indir1_block = NULL;
+ ext4fs_indir1_size = 0;
+ ext4fs_indir1_blkno = -1;
+ ext4fs_indir1_block = zalloc(blksz);
+ if (ext4fs_indir1_block == NULL) {
+ printf("** TI ext2fs read block (indir 2 1)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_size = blksz;
+ }
+ if ((__le32_to_cpu(inode->b.blocks.triple_indir_block) <<
+ log2_blksz) != ext4fs_indir1_blkno) {
+ status = ext4fs_devread
+ ((lbaint_t)
+ __le32_to_cpu(inode->b.blocks.triple_indir_block)
+ << log2_blksz, 0, blksz,
+ (char *)ext4fs_indir1_block);
+ if (status == 0) {
+ printf("** TI ext2fs read block (indir 2 1)"
+ "failed. **\n");
+ return -1;
+ }
+ ext4fs_indir1_blkno =
+ __le32_to_cpu(inode->b.blocks.triple_indir_block) <<
+ log2_blksz;
+ }
+
+ if (ext4fs_indir2_block == NULL) {
+ ext4fs_indir2_block = zalloc(blksz);
+ if (ext4fs_indir2_block == NULL) {
+ printf("** TI ext2fs read block (indir 2 2)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir2_size = blksz;
+ ext4fs_indir2_blkno = -1;
+ }
+ if (blksz != ext4fs_indir2_size) {
+ free(ext4fs_indir2_block);
+ ext4fs_indir2_block = NULL;
+ ext4fs_indir2_size = 0;
+ ext4fs_indir2_blkno = -1;
+ ext4fs_indir2_block = zalloc(blksz);
+ if (ext4fs_indir2_block == NULL) {
+ printf("** TI ext2fs read block (indir 2 2)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir2_size = blksz;
+ }
+ if ((__le32_to_cpu(ext4fs_indir1_block[rblock /
+ perblock_parent]) <<
+ log2_blksz)
+ != ext4fs_indir2_blkno) {
+ status = ext4fs_devread((lbaint_t)__le32_to_cpu
+ (ext4fs_indir1_block
+ [rblock /
+ perblock_parent]) <<
+ log2_blksz, 0, blksz,
+ (char *)ext4fs_indir2_block);
+ if (status == 0) {
+ printf("** TI ext2fs read block (indir 2 2)"
+ "failed. **\n");
+ return -1;
+ }
+ ext4fs_indir2_blkno =
+ __le32_to_cpu(ext4fs_indir1_block[rblock /
+ perblock_parent])
+ << log2_blksz;
+ }
+
+ if (ext4fs_indir3_block == NULL) {
+ ext4fs_indir3_block = zalloc(blksz);
+ if (ext4fs_indir3_block == NULL) {
+ printf("** TI ext2fs read block (indir 2 2)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir3_size = blksz;
+ ext4fs_indir3_blkno = -1;
+ }
+ if (blksz != ext4fs_indir3_size) {
+ free(ext4fs_indir3_block);
+ ext4fs_indir3_block = NULL;
+ ext4fs_indir3_size = 0;
+ ext4fs_indir3_blkno = -1;
+ ext4fs_indir3_block = zalloc(blksz);
+ if (ext4fs_indir3_block == NULL) {
+ printf("** TI ext2fs read block (indir 2 2)"
+ "malloc failed. **\n");
+ return -1;
+ }
+ ext4fs_indir3_size = blksz;
+ }
+ if ((__le32_to_cpu(ext4fs_indir2_block[rblock
+ /
+ perblock_child]) <<
+ log2_blksz) != ext4fs_indir3_blkno) {
+ status =
+ ext4fs_devread((lbaint_t)__le32_to_cpu
+ (ext4fs_indir2_block
+ [(rblock / perblock_child)
+ % (blksz / 4)]) << log2_blksz, 0,
+ blksz, (char *)ext4fs_indir3_block);
+ if (status == 0) {
+ printf("** TI ext2fs read block (indir 2 2)"
+ "failed. **\n");
+ return -1;
+ }
+ ext4fs_indir3_blkno =
+ __le32_to_cpu(ext4fs_indir2_block[(rblock /
+ perblock_child) %
+ (blksz /
+ 4)]) <<
+ log2_blksz;
+ }
+
+ blknr = __le32_to_cpu(ext4fs_indir3_block
+ [rblock % perblock_child]);
+ }
+ debug("read_allocated_block %ld\n", blknr);
+
+ return blknr;
+}
+
+/**
+ * ext4fs_reinit_global() - Reinitialize values of ext4 write implementation's
+ * global pointers
+ *
+ * This function assures that for a file with the same name but different size
+ * the sequential store on the ext4 filesystem will be correct.
+ *
+ * In this function the global data, responsible for internal representation
+ * of the ext4 data are initialized to the reset state. Without this, during
+ * replacement of the smaller file with the bigger truncation of new file was
+ * performed.
+ */
+void ext4fs_reinit_global(void)
+{
+ if (ext4fs_indir1_block != NULL) {
+ free(ext4fs_indir1_block);
+ ext4fs_indir1_block = NULL;
+ ext4fs_indir1_size = 0;
+ ext4fs_indir1_blkno = -1;
+ }
+ if (ext4fs_indir2_block != NULL) {
+ free(ext4fs_indir2_block);
+ ext4fs_indir2_block = NULL;
+ ext4fs_indir2_size = 0;
+ ext4fs_indir2_blkno = -1;
+ }
+ if (ext4fs_indir3_block != NULL) {
+ free(ext4fs_indir3_block);
+ ext4fs_indir3_block = NULL;
+ ext4fs_indir3_size = 0;
+ ext4fs_indir3_blkno = -1;
+ }
+}
+void ext4fs_close(void)
+{
+ if ((ext4fs_file != NULL) && (ext4fs_root != NULL)) {
+ ext4fs_free_node(ext4fs_file, &ext4fs_root->diropen);
+ ext4fs_file = NULL;
+ }
+ if (ext4fs_root != NULL) {
+ free(ext4fs_root);
+ ext4fs_root = NULL;
+ }
+
+ ext4fs_reinit_global();
+}
+
+int ext4fs_iterate_dir(struct ext2fs_node *dir, char *name,
+ struct ext2fs_node **fnode, int *ftype)
+{
+ unsigned int fpos = 0;
+ int status;
+ struct ext2fs_node *diro = (struct ext2fs_node *) dir;
+
+#ifdef DEBUG
+ if (name != NULL)
+ printf("Iterate dir %s\n", name);
+#endif /* of DEBUG */
+ if (!diro->inode_read) {
+ status = ext4fs_read_inode(diro->data, diro->ino, &diro->inode);
+ if (status == 0)
+ return 0;
+ }
+ /* Search the file. */
+ while (fpos < __le32_to_cpu(diro->inode.size)) {
+ struct ext2_dirent dirent;
+
+ status = ext4fs_read_file(diro, fpos,
+ sizeof(struct ext2_dirent),
+ (char *) &dirent);
+ if (status < 1)
+ return 0;
+
+ if (dirent.namelen != 0) {
+ char filename[dirent.namelen + 1];
+ struct ext2fs_node *fdiro;
+ int type = FILETYPE_UNKNOWN;
+
+ status = ext4fs_read_file(diro,
+ fpos +
+ sizeof(struct ext2_dirent),
+ dirent.namelen, filename);
+ if (status < 1)
+ return 0;
+
+ fdiro = zalloc(sizeof(struct ext2fs_node));
+ if (!fdiro)
+ return 0;
+
+ fdiro->data = diro->data;
+ fdiro->ino = __le32_to_cpu(dirent.inode);
+
+ filename[dirent.namelen] = '\0';
+
+ if (dirent.filetype != FILETYPE_UNKNOWN) {
+ fdiro->inode_read = 0;
+
+ if (dirent.filetype == FILETYPE_DIRECTORY)
+ type = FILETYPE_DIRECTORY;
+ else if (dirent.filetype == FILETYPE_SYMLINK)
+ type = FILETYPE_SYMLINK;
+ else if (dirent.filetype == FILETYPE_REG)
+ type = FILETYPE_REG;
+ } else {
+ status = ext4fs_read_inode(diro->data,
+ __le32_to_cpu
+ (dirent.inode),
+ &fdiro->inode);
+ if (status == 0) {
+ free(fdiro);
+ return 0;
+ }
+ fdiro->inode_read = 1;
+
+ if ((__le16_to_cpu(fdiro->inode.mode) &
+ FILETYPE_INO_MASK) ==
+ FILETYPE_INO_DIRECTORY) {
+ type = FILETYPE_DIRECTORY;
+ } else if ((__le16_to_cpu(fdiro->inode.mode)
+ & FILETYPE_INO_MASK) ==
+ FILETYPE_INO_SYMLINK) {
+ type = FILETYPE_SYMLINK;
+ } else if ((__le16_to_cpu(fdiro->inode.mode)
+ & FILETYPE_INO_MASK) ==
+ FILETYPE_INO_REG) {
+ type = FILETYPE_REG;
+ }
+ }
+#ifdef DEBUG
+ printf("iterate >%s<\n", filename);
+#endif /* of DEBUG */
+ if ((name != NULL) && (fnode != NULL)
+ && (ftype != NULL)) {
+ if (strcmp(filename, name) == 0) {
+ *ftype = type;
+ *fnode = fdiro;
+ return 1;
+ }
+ } else {
+ if (fdiro->inode_read == 0) {
+ status = ext4fs_read_inode(diro->data,
+ __le32_to_cpu(
+ dirent.inode),
+ &fdiro->inode);
+ if (status == 0) {
+ free(fdiro);
+ return 0;
+ }
+ fdiro->inode_read = 1;
+ }
+ switch (type) {
+ case FILETYPE_DIRECTORY:
+ printf("<DIR> ");
+ break;
+ case FILETYPE_SYMLINK:
+ printf("<SYM> ");
+ break;
+ case FILETYPE_REG:
+ printf(" ");
+ break;
+ default:
+ printf("< ? > ");
+ break;
+ }
+ printf("%10d %s\n",
+ __le32_to_cpu(fdiro->inode.size),
+ filename);
+ }
+ free(fdiro);
+ }
+ fpos += __le16_to_cpu(dirent.direntlen);
+ }
+ return 0;
+}
+
+static char *ext4fs_read_symlink(struct ext2fs_node *node)
+{
+ char *symlink;
+ struct ext2fs_node *diro = node;
+ int status;
+
+ if (!diro->inode_read) {
+ status = ext4fs_read_inode(diro->data, diro->ino, &diro->inode);
+ if (status == 0)
+ return 0;
+ }
+ symlink = zalloc(__le32_to_cpu(diro->inode.size) + 1);
+ if (!symlink)
+ return 0;
+
+ if (__le32_to_cpu(diro->inode.size) <= 60) {
+ strncpy(symlink, diro->inode.b.symlink,
+ __le32_to_cpu(diro->inode.size));
+ } else {
+ status = ext4fs_read_file(diro, 0,
+ __le32_to_cpu(diro->inode.size),
+ symlink);
+ if (status == 0) {
+ free(symlink);
+ return 0;
+ }
+ }
+ symlink[__le32_to_cpu(diro->inode.size)] = '\0';
+ return symlink;
+}
+
+static int ext4fs_find_file1(const char *currpath,
+ struct ext2fs_node *currroot,
+ struct ext2fs_node **currfound, int *foundtype)
+{
+ char fpath[strlen(currpath) + 1];
+ char *name = fpath;
+ char *next;
+ int status;
+ int type = FILETYPE_DIRECTORY;
+ struct ext2fs_node *currnode = currroot;
+ struct ext2fs_node *oldnode = currroot;
+
+ strncpy(fpath, currpath, strlen(currpath) + 1);
+
+ /* Remove all leading slashes. */
+ while (*name == '/')
+ name++;
+
+ if (!*name) {
+ *currfound = currnode;
+ return 1;
+ }
+
+ for (;;) {
+ int found;
+
+ /* Extract the actual part from the pathname. */
+ next = strchr(name, '/');
+ if (next) {
+ /* Remove all leading slashes. */
+ while (*next == '/')
+ *(next++) = '\0';
+ }
+
+ if (type != FILETYPE_DIRECTORY) {
+ ext4fs_free_node(currnode, currroot);
+ return 0;
+ }
+
+ oldnode = currnode;
+
+ /* Iterate over the directory. */
+ found = ext4fs_iterate_dir(currnode, name, &currnode, &type);
+ if (found == 0)
+ return 0;
+
+ if (found == -1)
+ break;
+
+ /* Read in the symlink and follow it. */
+ if (type == FILETYPE_SYMLINK) {
+ char *symlink;
+
+ /* Test if the symlink does not loop. */
+ if (++symlinknest == 8) {
+ ext4fs_free_node(currnode, currroot);
+ ext4fs_free_node(oldnode, currroot);
+ return 0;
+ }
+
+ symlink = ext4fs_read_symlink(currnode);
+ ext4fs_free_node(currnode, currroot);
+
+ if (!symlink) {
+ ext4fs_free_node(oldnode, currroot);
+ return 0;
+ }
+
+ debug("Got symlink >%s<\n", symlink);
+
+ if (symlink[0] == '/') {
+ ext4fs_free_node(oldnode, currroot);
+ oldnode = &ext4fs_root->diropen;
+ }
+
+ /* Lookup the node the symlink points to. */
+ status = ext4fs_find_file1(symlink, oldnode,
+ &currnode, &type);
+
+ free(symlink);
+
+ if (status == 0) {
+ ext4fs_free_node(oldnode, currroot);
+ return 0;
+ }
+ }
+
+ ext4fs_free_node(oldnode, currroot);
+
+ /* Found the node! */
+ if (!next || *next == '\0') {
+ *currfound = currnode;
+ *foundtype = type;
+ return 1;
+ }
+ name = next;
+ }
+ return -1;
+}
+
+int ext4fs_find_file(const char *path, struct ext2fs_node *rootnode,
+ struct ext2fs_node **foundnode, int expecttype)
+{
+ int status;
+ int foundtype = FILETYPE_DIRECTORY;
+
+ symlinknest = 0;
+ if (!path)
+ return 0;
+
+ status = ext4fs_find_file1(path, rootnode, foundnode, &foundtype);
+ if (status == 0)
+ return 0;
+
+ /* Check if the node that was found was of the expected type. */
+ if ((expecttype == FILETYPE_REG) && (foundtype != expecttype))
+ return 0;
+ else if ((expecttype == FILETYPE_DIRECTORY)
+ && (foundtype != expecttype))
+ return 0;
+
+ return 1;
+}
+
+int ext4fs_open(const char *filename)
+{
+ struct ext2fs_node *fdiro = NULL;
+ int status;
+ int len;
+
+ if (ext4fs_root == NULL)
+ return -1;
+
+ ext4fs_file = NULL;
+ status = ext4fs_find_file(filename, &ext4fs_root->diropen, &fdiro,
+ FILETYPE_REG);
+ if (status == 0)
+ goto fail;
+
+ if (!fdiro->inode_read) {
+ status = ext4fs_read_inode(fdiro->data, fdiro->ino,
+ &fdiro->inode);
+ if (status == 0)
+ goto fail;
+ }
+ len = __le32_to_cpu(fdiro->inode.size);
+ ext4fs_file = fdiro;
+
+ return len;
+fail:
+ ext4fs_free_node(fdiro, &ext4fs_root->diropen);
+
+ return -1;
+}
+
+int ext4fs_mount(unsigned part_length)
+{
+ struct ext2_data *data;
+ int status;
+ struct ext_filesystem *fs = get_fs();
+ data = zalloc(SUPERBLOCK_SIZE);
+ if (!data)
+ return 0;
+
+ /* Read the superblock. */
+ status = ext4_read_superblock((char *)&data->sblock);
+
+ if (status == 0)
+ goto fail;
+
+ /* Make sure this is an ext2 filesystem. */
+ if (__le16_to_cpu(data->sblock.magic) != EXT2_MAGIC)
+ goto fail;
+
+ if (__le32_to_cpu(data->sblock.revision_level == 0))
+ fs->inodesz = 128;
+ else
+ fs->inodesz = __le16_to_cpu(data->sblock.inode_size);
+
+ debug("EXT2 rev %d, inode_size %d\n",
+ __le32_to_cpu(data->sblock.revision_level), fs->inodesz);
+
+ data->diropen.data = data;
+ data->diropen.ino = 2;
+ data->diropen.inode_read = 1;
+ data->inode = &data->diropen.inode;
+
+ status = ext4fs_read_inode(data, 2, data->inode);
+ if (status == 0)
+ goto fail;
+
+ ext4fs_root = data;
+
+ return 1;
+fail:
+ printf("Failed to mount ext2 filesystem...\n");
+ free(data);
+ ext4fs_root = NULL;
+
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/ext4/ext4_common.h b/qemu/roms/u-boot/fs/ext4/ext4_common.h
new file mode 100644
index 000000000..5fa1719f2
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/ext4_common.h
@@ -0,0 +1,78 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * ext4ls and ext4load : based on ext2 ls load support in Uboot.
+ *
+ * (C) Copyright 2004
+ * esd gmbh <www.esd-electronics.com>
+ * Reinhard Arlt <reinhard.arlt@esd-electronics.com>
+ *
+ * based on code from grub2 fs/ext2.c and fs/fshelp.c by
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ *
+ * ext4write : Based on generic ext4 protocol.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __EXT4_COMMON__
+#define __EXT4_COMMON__
+#include <ext_common.h>
+#include <ext4fs.h>
+#include <malloc.h>
+#include <asm/errno.h>
+#if defined(CONFIG_EXT4_WRITE)
+#include "ext4_journal.h"
+#include "crc16.h"
+#endif
+
+#define YES 1
+#define NO 0
+#define RECOVER 1
+#define SCAN 0
+
+#define S_IFLNK 0120000 /* symbolic link */
+#define BLOCK_NO_ONE 1
+#define SUPERBLOCK_START (2 * 512)
+#define SUPERBLOCK_SIZE 1024
+#define F_FILE 1
+
+static inline void *zalloc(size_t size)
+{
+ void *p = memalign(ARCH_DMA_MINALIGN, size);
+ memset(p, 0, size);
+ return p;
+}
+
+int ext4fs_read_inode(struct ext2_data *data, int ino,
+ struct ext2_inode *inode);
+int ext4fs_read_file(struct ext2fs_node *node, int pos,
+ unsigned int len, char *buf);
+int ext4fs_find_file(const char *path, struct ext2fs_node *rootnode,
+ struct ext2fs_node **foundnode, int expecttype);
+int ext4fs_iterate_dir(struct ext2fs_node *dir, char *name,
+ struct ext2fs_node **fnode, int *ftype);
+
+#if defined(CONFIG_EXT4_WRITE)
+uint32_t ext4fs_div_roundup(uint32_t size, uint32_t n);
+int ext4fs_checksum_update(unsigned int i);
+int ext4fs_get_parent_inode_num(const char *dirname, char *dname, int flags);
+void ext4fs_update_parent_dentry(char *filename, int *p_ino, int file_type);
+long int ext4fs_get_new_blk_no(void);
+int ext4fs_get_new_inode_no(void);
+void ext4fs_reset_block_bmap(long int blockno, unsigned char *buffer,
+ int index);
+int ext4fs_set_block_bmap(long int blockno, unsigned char *buffer, int index);
+int ext4fs_set_inode_bmap(int inode_no, unsigned char *buffer, int index);
+void ext4fs_reset_inode_bmap(int inode_no, unsigned char *buffer, int index);
+int ext4fs_iget(int inode_no, struct ext2_inode *inode);
+void ext4fs_allocate_blocks(struct ext2_inode *file_inode,
+ unsigned int total_remaining_blocks,
+ unsigned int *total_no_of_block);
+void put_ext4(uint64_t off, void *buf, uint32_t size);
+#endif
+#endif
diff --git a/qemu/roms/u-boot/fs/ext4/ext4_journal.c b/qemu/roms/u-boot/fs/ext4/ext4_journal.c
new file mode 100644
index 000000000..3f613351a
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/ext4_journal.c
@@ -0,0 +1,653 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * Journal data structures and headers for Journaling feature of ext4
+ * have been referred from JBD2 (Journaling Block device 2)
+ * implementation in Linux Kernel.
+ * Written by Stephen C. Tweedie <sct@redhat.com>
+ *
+ * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <ext4fs.h>
+#include <malloc.h>
+#include <ext_common.h>
+#include "ext4_common.h"
+
+static struct revoke_blk_list *revk_blk_list;
+static struct revoke_blk_list *prev_node;
+static int first_node = true;
+
+int gindex;
+int gd_index;
+int jrnl_blk_idx;
+struct journal_log *journal_ptr[MAX_JOURNAL_ENTRIES];
+struct dirty_blocks *dirty_block_ptr[MAX_JOURNAL_ENTRIES];
+
+int ext4fs_init_journal(void)
+{
+ int i;
+ char *temp = NULL;
+ struct ext_filesystem *fs = get_fs();
+
+ /* init globals */
+ revk_blk_list = NULL;
+ prev_node = NULL;
+ gindex = 0;
+ gd_index = 0;
+ jrnl_blk_idx = 1;
+
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ journal_ptr[i] = zalloc(sizeof(struct journal_log));
+ if (!journal_ptr[i])
+ goto fail;
+ dirty_block_ptr[i] = zalloc(sizeof(struct dirty_blocks));
+ if (!dirty_block_ptr[i])
+ goto fail;
+ journal_ptr[i]->buf = NULL;
+ journal_ptr[i]->blknr = -1;
+
+ dirty_block_ptr[i]->buf = NULL;
+ dirty_block_ptr[i]->blknr = -1;
+ }
+
+ if (fs->blksz == 4096) {
+ temp = zalloc(fs->blksz);
+ if (!temp)
+ goto fail;
+ journal_ptr[gindex]->buf = zalloc(fs->blksz);
+ if (!journal_ptr[gindex]->buf)
+ goto fail;
+ ext4fs_devread(0, 0, fs->blksz, temp);
+ memcpy(temp + SUPERBLOCK_SIZE, fs->sb, SUPERBLOCK_SIZE);
+ memcpy(journal_ptr[gindex]->buf, temp, fs->blksz);
+ journal_ptr[gindex++]->blknr = 0;
+ free(temp);
+ } else {
+ journal_ptr[gindex]->buf = zalloc(fs->blksz);
+ if (!journal_ptr[gindex]->buf)
+ goto fail;
+ memcpy(journal_ptr[gindex]->buf, fs->sb, SUPERBLOCK_SIZE);
+ journal_ptr[gindex++]->blknr = 1;
+ }
+
+ /* Check the file system state using journal super block */
+ if (ext4fs_check_journal_state(SCAN))
+ goto fail;
+ /* Check the file system state using journal super block */
+ if (ext4fs_check_journal_state(RECOVER))
+ goto fail;
+
+ return 0;
+fail:
+ return -1;
+}
+
+void ext4fs_dump_metadata(void)
+{
+ struct ext_filesystem *fs = get_fs();
+ int i;
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (dirty_block_ptr[i]->blknr == -1)
+ break;
+ put_ext4((uint64_t) ((uint64_t)dirty_block_ptr[i]->blknr *
+ (uint64_t)fs->blksz), dirty_block_ptr[i]->buf,
+ fs->blksz);
+ }
+}
+
+void ext4fs_free_journal(void)
+{
+ int i;
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (dirty_block_ptr[i]->blknr == -1)
+ break;
+ if (dirty_block_ptr[i]->buf)
+ free(dirty_block_ptr[i]->buf);
+ }
+
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (journal_ptr[i]->blknr == -1)
+ break;
+ if (journal_ptr[i]->buf)
+ free(journal_ptr[i]->buf);
+ }
+
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (journal_ptr[i])
+ free(journal_ptr[i]);
+ if (dirty_block_ptr[i])
+ free(dirty_block_ptr[i]);
+ }
+ gindex = 0;
+ gd_index = 0;
+ jrnl_blk_idx = 1;
+}
+
+int ext4fs_log_gdt(char *gd_table)
+{
+ struct ext_filesystem *fs = get_fs();
+ short i;
+ long int var = fs->gdtable_blkno;
+ for (i = 0; i < fs->no_blk_pergdt; i++) {
+ journal_ptr[gindex]->buf = zalloc(fs->blksz);
+ if (!journal_ptr[gindex]->buf)
+ return -ENOMEM;
+ memcpy(journal_ptr[gindex]->buf, gd_table, fs->blksz);
+ gd_table += fs->blksz;
+ journal_ptr[gindex++]->blknr = var++;
+ }
+
+ return 0;
+}
+
+/*
+ * This function stores the backup copy of meta data in RAM
+ * journal_buffer -- Buffer containing meta data
+ * blknr -- Block number on disk of the meta data buffer
+ */
+int ext4fs_log_journal(char *journal_buffer, long int blknr)
+{
+ struct ext_filesystem *fs = get_fs();
+ short i;
+
+ if (!journal_buffer) {
+ printf("Invalid input arguments %s\n", __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (journal_ptr[i]->blknr == -1)
+ break;
+ if (journal_ptr[i]->blknr == blknr)
+ return 0;
+ }
+
+ journal_ptr[gindex]->buf = zalloc(fs->blksz);
+ if (!journal_ptr[gindex]->buf)
+ return -ENOMEM;
+
+ memcpy(journal_ptr[gindex]->buf, journal_buffer, fs->blksz);
+ journal_ptr[gindex++]->blknr = blknr;
+
+ return 0;
+}
+
+/*
+ * This function stores the modified meta data in RAM
+ * metadata_buffer -- Buffer containing meta data
+ * blknr -- Block number on disk of the meta data buffer
+ */
+int ext4fs_put_metadata(char *metadata_buffer, long int blknr)
+{
+ struct ext_filesystem *fs = get_fs();
+ if (!metadata_buffer) {
+ printf("Invalid input arguments %s\n", __func__);
+ return -EINVAL;
+ }
+ dirty_block_ptr[gd_index]->buf = zalloc(fs->blksz);
+ if (!dirty_block_ptr[gd_index]->buf)
+ return -ENOMEM;
+ memcpy(dirty_block_ptr[gd_index]->buf, metadata_buffer, fs->blksz);
+ dirty_block_ptr[gd_index++]->blknr = blknr;
+
+ return 0;
+}
+
+void print_revoke_blks(char *revk_blk)
+{
+ int offset;
+ int max;
+ long int blocknr;
+ struct journal_revoke_header_t *header;
+
+ if (revk_blk == NULL)
+ return;
+
+ header = (struct journal_revoke_header_t *) revk_blk;
+ offset = sizeof(struct journal_revoke_header_t);
+ max = be32_to_cpu(header->r_count);
+ printf("total bytes %d\n", max);
+
+ while (offset < max) {
+ blocknr = be32_to_cpu(*((long int *)(revk_blk + offset)));
+ printf("revoke blknr is %ld\n", blocknr);
+ offset += 4;
+ }
+}
+
+static struct revoke_blk_list *_get_node(void)
+{
+ struct revoke_blk_list *tmp_node;
+ tmp_node = zalloc(sizeof(struct revoke_blk_list));
+ if (tmp_node == NULL)
+ return NULL;
+ tmp_node->content = NULL;
+ tmp_node->next = NULL;
+
+ return tmp_node;
+}
+
+void ext4fs_push_revoke_blk(char *buffer)
+{
+ struct revoke_blk_list *node = NULL;
+ struct ext_filesystem *fs = get_fs();
+ if (buffer == NULL) {
+ printf("buffer ptr is NULL\n");
+ return;
+ }
+ node = _get_node();
+ if (!node) {
+ printf("_get_node: malloc failed\n");
+ return;
+ }
+
+ node->content = zalloc(fs->blksz);
+ if (node->content == NULL)
+ return;
+ memcpy(node->content, buffer, fs->blksz);
+
+ if (first_node == true) {
+ revk_blk_list = node;
+ prev_node = node;
+ first_node = false;
+ } else {
+ prev_node->next = node;
+ prev_node = node;
+ }
+}
+
+void ext4fs_free_revoke_blks(void)
+{
+ struct revoke_blk_list *tmp_node = revk_blk_list;
+ struct revoke_blk_list *next_node = NULL;
+
+ while (tmp_node != NULL) {
+ if (tmp_node->content)
+ free(tmp_node->content);
+ tmp_node = tmp_node->next;
+ }
+
+ tmp_node = revk_blk_list;
+ while (tmp_node != NULL) {
+ next_node = tmp_node->next;
+ free(tmp_node);
+ tmp_node = next_node;
+ }
+
+ revk_blk_list = NULL;
+ prev_node = NULL;
+ first_node = true;
+}
+
+int check_blknr_for_revoke(long int blknr, int sequence_no)
+{
+ struct journal_revoke_header_t *header;
+ int offset;
+ int max;
+ long int blocknr;
+ char *revk_blk;
+ struct revoke_blk_list *tmp_revk_node = revk_blk_list;
+ while (tmp_revk_node != NULL) {
+ revk_blk = tmp_revk_node->content;
+
+ header = (struct journal_revoke_header_t *) revk_blk;
+ if (sequence_no < be32_to_cpu(header->r_header.h_sequence)) {
+ offset = sizeof(struct journal_revoke_header_t);
+ max = be32_to_cpu(header->r_count);
+
+ while (offset < max) {
+ blocknr = be32_to_cpu(*((long int *)
+ (revk_blk + offset)));
+ if (blocknr == blknr)
+ goto found;
+ offset += 4;
+ }
+ }
+ tmp_revk_node = tmp_revk_node->next;
+ }
+
+ return -1;
+
+found:
+ return 0;
+}
+
+/*
+ * This function parses the journal blocks and replays the
+ * suceessful transactions. A transaction is successfull
+ * if commit block is found for a descriptor block
+ * The tags in descriptor block contain the disk block
+ * numbers of the metadata to be replayed
+ */
+void recover_transaction(int prev_desc_logical_no)
+{
+ struct ext2_inode inode_journal;
+ struct ext_filesystem *fs = get_fs();
+ struct journal_header_t *jdb;
+ long int blknr;
+ char *p_jdb;
+ int ofs, flags;
+ int i;
+ struct ext3_journal_block_tag *tag;
+ char *temp_buff = zalloc(fs->blksz);
+ char *metadata_buff = zalloc(fs->blksz);
+ if (!temp_buff || !metadata_buff)
+ goto fail;
+ i = prev_desc_logical_no;
+ ext4fs_read_inode(ext4fs_root, EXT2_JOURNAL_INO,
+ (struct ext2_inode *)&inode_journal);
+ blknr = read_allocated_block((struct ext2_inode *)
+ &inode_journal, i);
+ ext4fs_devread((lbaint_t)blknr * fs->sect_perblk, 0, fs->blksz,
+ temp_buff);
+ p_jdb = (char *)temp_buff;
+ jdb = (struct journal_header_t *) temp_buff;
+ ofs = sizeof(struct journal_header_t);
+
+ do {
+ tag = (struct ext3_journal_block_tag *)&p_jdb[ofs];
+ ofs += sizeof(struct ext3_journal_block_tag);
+
+ if (ofs > fs->blksz)
+ break;
+
+ flags = be32_to_cpu(tag->flags);
+ if (!(flags & EXT3_JOURNAL_FLAG_SAME_UUID))
+ ofs += 16;
+
+ i++;
+ debug("\t\ttag %u\n", be32_to_cpu(tag->block));
+ if (revk_blk_list != NULL) {
+ if (check_blknr_for_revoke(be32_to_cpu(tag->block),
+ be32_to_cpu(jdb->h_sequence)) == 0)
+ continue;
+ }
+ blknr = read_allocated_block(&inode_journal, i);
+ ext4fs_devread((lbaint_t)blknr * fs->sect_perblk, 0,
+ fs->blksz, metadata_buff);
+ put_ext4((uint64_t)((uint64_t)be32_to_cpu(tag->block) * (uint64_t)fs->blksz),
+ metadata_buff, (uint32_t) fs->blksz);
+ } while (!(flags & EXT3_JOURNAL_FLAG_LAST_TAG));
+fail:
+ free(temp_buff);
+ free(metadata_buff);
+}
+
+void print_jrnl_status(int recovery_flag)
+{
+ if (recovery_flag == RECOVER)
+ printf("Journal Recovery Completed\n");
+ else
+ printf("Journal Scan Completed\n");
+}
+
+int ext4fs_check_journal_state(int recovery_flag)
+{
+ int i;
+ int DB_FOUND = NO;
+ long int blknr;
+ int transaction_state = TRANSACTION_COMPLETE;
+ int prev_desc_logical_no = 0;
+ int curr_desc_logical_no = 0;
+ int ofs, flags;
+ struct ext2_inode inode_journal;
+ struct journal_superblock_t *jsb = NULL;
+ struct journal_header_t *jdb = NULL;
+ char *p_jdb = NULL;
+ struct ext3_journal_block_tag *tag = NULL;
+ char *temp_buff = NULL;
+ char *temp_buff1 = NULL;
+ struct ext_filesystem *fs = get_fs();
+
+ temp_buff = zalloc(fs->blksz);
+ if (!temp_buff)
+ return -ENOMEM;
+ temp_buff1 = zalloc(fs->blksz);
+ if (!temp_buff1) {
+ free(temp_buff);
+ return -ENOMEM;
+ }
+
+ ext4fs_read_inode(ext4fs_root, EXT2_JOURNAL_INO, &inode_journal);
+ blknr = read_allocated_block(&inode_journal, EXT2_JOURNAL_SUPERBLOCK);
+ ext4fs_devread((lbaint_t)blknr * fs->sect_perblk, 0, fs->blksz,
+ temp_buff);
+ jsb = (struct journal_superblock_t *) temp_buff;
+
+ if (fs->sb->feature_incompat & EXT3_FEATURE_INCOMPAT_RECOVER) {
+ if (recovery_flag == RECOVER)
+ printf("Recovery required\n");
+ } else {
+ if (recovery_flag == RECOVER)
+ printf("File System is consistent\n");
+ goto end;
+ }
+
+ if (be32_to_cpu(jsb->s_start) == 0)
+ goto end;
+
+ if (!(jsb->s_feature_compat &
+ cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM)))
+ jsb->s_feature_compat |=
+ cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
+
+ i = be32_to_cpu(jsb->s_first);
+ while (1) {
+ blknr = read_allocated_block(&inode_journal, i);
+ memset(temp_buff1, '\0', fs->blksz);
+ ext4fs_devread((lbaint_t)blknr * fs->sect_perblk,
+ 0, fs->blksz, temp_buff1);
+ jdb = (struct journal_header_t *) temp_buff1;
+
+ if (be32_to_cpu(jdb->h_blocktype) ==
+ EXT3_JOURNAL_DESCRIPTOR_BLOCK) {
+ if (be32_to_cpu(jdb->h_sequence) !=
+ be32_to_cpu(jsb->s_sequence)) {
+ print_jrnl_status(recovery_flag);
+ break;
+ }
+
+ curr_desc_logical_no = i;
+ if (transaction_state == TRANSACTION_COMPLETE)
+ transaction_state = TRANSACTION_RUNNING;
+ else
+ return -1;
+ p_jdb = (char *)temp_buff1;
+ ofs = sizeof(struct journal_header_t);
+ do {
+ tag = (struct ext3_journal_block_tag *)
+ &p_jdb[ofs];
+ ofs += sizeof(struct ext3_journal_block_tag);
+ if (ofs > fs->blksz)
+ break;
+ flags = be32_to_cpu(tag->flags);
+ if (!(flags & EXT3_JOURNAL_FLAG_SAME_UUID))
+ ofs += 16;
+ i++;
+ debug("\t\ttag %u\n", be32_to_cpu(tag->block));
+ } while (!(flags & EXT3_JOURNAL_FLAG_LAST_TAG));
+ i++;
+ DB_FOUND = YES;
+ } else if (be32_to_cpu(jdb->h_blocktype) ==
+ EXT3_JOURNAL_COMMIT_BLOCK) {
+ if (be32_to_cpu(jdb->h_sequence) !=
+ be32_to_cpu(jsb->s_sequence)) {
+ print_jrnl_status(recovery_flag);
+ break;
+ }
+
+ if (transaction_state == TRANSACTION_RUNNING ||
+ (DB_FOUND == NO)) {
+ transaction_state = TRANSACTION_COMPLETE;
+ i++;
+ jsb->s_sequence =
+ cpu_to_be32(be32_to_cpu(
+ jsb->s_sequence) + 1);
+ }
+ prev_desc_logical_no = curr_desc_logical_no;
+ if ((recovery_flag == RECOVER) && (DB_FOUND == YES))
+ recover_transaction(prev_desc_logical_no);
+
+ DB_FOUND = NO;
+ } else if (be32_to_cpu(jdb->h_blocktype) ==
+ EXT3_JOURNAL_REVOKE_BLOCK) {
+ if (be32_to_cpu(jdb->h_sequence) !=
+ be32_to_cpu(jsb->s_sequence)) {
+ print_jrnl_status(recovery_flag);
+ break;
+ }
+ if (recovery_flag == SCAN)
+ ext4fs_push_revoke_blk((char *)jdb);
+ i++;
+ } else {
+ debug("Else Case\n");
+ if (be32_to_cpu(jdb->h_sequence) !=
+ be32_to_cpu(jsb->s_sequence)) {
+ print_jrnl_status(recovery_flag);
+ break;
+ }
+ }
+ }
+
+end:
+ if (recovery_flag == RECOVER) {
+ jsb->s_start = cpu_to_be32(1);
+ jsb->s_sequence = cpu_to_be32(be32_to_cpu(jsb->s_sequence) + 1);
+ /* get the superblock */
+ ext4_read_superblock((char *)fs->sb);
+ fs->sb->feature_incompat |= EXT3_FEATURE_INCOMPAT_RECOVER;
+
+ /* Update the super block */
+ put_ext4((uint64_t) (SUPERBLOCK_SIZE),
+ (struct ext2_sblock *)fs->sb,
+ (uint32_t) SUPERBLOCK_SIZE);
+ ext4_read_superblock((char *)fs->sb);
+
+ blknr = read_allocated_block(&inode_journal,
+ EXT2_JOURNAL_SUPERBLOCK);
+ put_ext4((uint64_t) ((uint64_t)blknr * (uint64_t)fs->blksz),
+ (struct journal_superblock_t *)temp_buff,
+ (uint32_t) fs->blksz);
+ ext4fs_free_revoke_blks();
+ }
+ free(temp_buff);
+ free(temp_buff1);
+
+ return 0;
+}
+
+static void update_descriptor_block(long int blknr)
+{
+ int i;
+ long int jsb_blknr;
+ struct journal_header_t jdb;
+ struct ext3_journal_block_tag tag;
+ struct ext2_inode inode_journal;
+ struct journal_superblock_t *jsb = NULL;
+ char *buf = NULL;
+ char *temp = NULL;
+ struct ext_filesystem *fs = get_fs();
+ char *temp_buff = zalloc(fs->blksz);
+ if (!temp_buff)
+ return;
+
+ ext4fs_read_inode(ext4fs_root, EXT2_JOURNAL_INO, &inode_journal);
+ jsb_blknr = read_allocated_block(&inode_journal,
+ EXT2_JOURNAL_SUPERBLOCK);
+ ext4fs_devread((lbaint_t)jsb_blknr * fs->sect_perblk, 0, fs->blksz,
+ temp_buff);
+ jsb = (struct journal_superblock_t *) temp_buff;
+
+ jdb.h_blocktype = cpu_to_be32(EXT3_JOURNAL_DESCRIPTOR_BLOCK);
+ jdb.h_magic = cpu_to_be32(EXT3_JOURNAL_MAGIC_NUMBER);
+ jdb.h_sequence = jsb->s_sequence;
+ buf = zalloc(fs->blksz);
+ if (!buf) {
+ free(temp_buff);
+ return;
+ }
+ temp = buf;
+ memcpy(buf, &jdb, sizeof(struct journal_header_t));
+ temp += sizeof(struct journal_header_t);
+
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (journal_ptr[i]->blknr == -1)
+ break;
+
+ tag.block = cpu_to_be32(journal_ptr[i]->blknr);
+ tag.flags = cpu_to_be32(EXT3_JOURNAL_FLAG_SAME_UUID);
+ memcpy(temp, &tag, sizeof(struct ext3_journal_block_tag));
+ temp = temp + sizeof(struct ext3_journal_block_tag);
+ }
+
+ tag.block = cpu_to_be32(journal_ptr[--i]->blknr);
+ tag.flags = cpu_to_be32(EXT3_JOURNAL_FLAG_LAST_TAG);
+ memcpy(temp - sizeof(struct ext3_journal_block_tag), &tag,
+ sizeof(struct ext3_journal_block_tag));
+ put_ext4((uint64_t) ((uint64_t)blknr * (uint64_t)fs->blksz), buf, (uint32_t) fs->blksz);
+
+ free(temp_buff);
+ free(buf);
+}
+
+static void update_commit_block(long int blknr)
+{
+ struct journal_header_t jdb;
+ struct ext_filesystem *fs = get_fs();
+ char *buf = NULL;
+ struct ext2_inode inode_journal;
+ struct journal_superblock_t *jsb;
+ long int jsb_blknr;
+ char *temp_buff = zalloc(fs->blksz);
+ if (!temp_buff)
+ return;
+
+ ext4fs_read_inode(ext4fs_root, EXT2_JOURNAL_INO,
+ &inode_journal);
+ jsb_blknr = read_allocated_block(&inode_journal,
+ EXT2_JOURNAL_SUPERBLOCK);
+ ext4fs_devread((lbaint_t)jsb_blknr * fs->sect_perblk, 0, fs->blksz,
+ temp_buff);
+ jsb = (struct journal_superblock_t *) temp_buff;
+
+ jdb.h_blocktype = cpu_to_be32(EXT3_JOURNAL_COMMIT_BLOCK);
+ jdb.h_magic = cpu_to_be32(EXT3_JOURNAL_MAGIC_NUMBER);
+ jdb.h_sequence = jsb->s_sequence;
+ buf = zalloc(fs->blksz);
+ if (!buf) {
+ free(temp_buff);
+ return;
+ }
+ memcpy(buf, &jdb, sizeof(struct journal_header_t));
+ put_ext4((uint64_t) ((uint64_t)blknr * (uint64_t)fs->blksz), buf, (uint32_t) fs->blksz);
+
+ free(temp_buff);
+ free(buf);
+}
+
+void ext4fs_update_journal(void)
+{
+ struct ext2_inode inode_journal;
+ struct ext_filesystem *fs = get_fs();
+ long int blknr;
+ int i;
+ ext4fs_read_inode(ext4fs_root, EXT2_JOURNAL_INO, &inode_journal);
+ blknr = read_allocated_block(&inode_journal, jrnl_blk_idx++);
+ update_descriptor_block(blknr);
+ for (i = 0; i < MAX_JOURNAL_ENTRIES; i++) {
+ if (journal_ptr[i]->blknr == -1)
+ break;
+ blknr = read_allocated_block(&inode_journal, jrnl_blk_idx++);
+ put_ext4((uint64_t) ((uint64_t)blknr * (uint64_t)fs->blksz),
+ journal_ptr[i]->buf, fs->blksz);
+ }
+ blknr = read_allocated_block(&inode_journal, jrnl_blk_idx++);
+ update_commit_block(blknr);
+ printf("update journal finished\n");
+}
diff --git a/qemu/roms/u-boot/fs/ext4/ext4_journal.h b/qemu/roms/u-boot/fs/ext4/ext4_journal.h
new file mode 100644
index 000000000..d926094be
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/ext4_journal.h
@@ -0,0 +1,125 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * Journal data structures and headers for Journaling feature of ext4
+ * have been referred from JBD2 (Journaling Block device 2)
+ * implementation in Linux Kernel.
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>
+ *
+ * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#ifndef __EXT4_JRNL__
+#define __EXT4_JRNL__
+
+#define EXT2_JOURNAL_INO 8 /* Journal inode */
+#define EXT2_JOURNAL_SUPERBLOCK 0 /* Journal Superblock number */
+
+#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
+#define EXT3_JOURNAL_MAGIC_NUMBER 0xc03b3998U
+#define TRANSACTION_RUNNING 1
+#define TRANSACTION_COMPLETE 0
+#define EXT3_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
+#define EXT3_JOURNAL_DESCRIPTOR_BLOCK 1
+#define EXT3_JOURNAL_COMMIT_BLOCK 2
+#define EXT3_JOURNAL_SUPERBLOCK_V1 3
+#define EXT3_JOURNAL_SUPERBLOCK_V2 4
+#define EXT3_JOURNAL_REVOKE_BLOCK 5
+#define EXT3_JOURNAL_FLAG_ESCAPE 1
+#define EXT3_JOURNAL_FLAG_SAME_UUID 2
+#define EXT3_JOURNAL_FLAG_DELETED 4
+#define EXT3_JOURNAL_FLAG_LAST_TAG 8
+
+/* Maximum entries in 1 journal transaction */
+#define MAX_JOURNAL_ENTRIES 100
+struct journal_log {
+ char *buf;
+ int blknr;
+};
+
+struct dirty_blocks {
+ char *buf;
+ int blknr;
+};
+
+/* Standard header for all descriptor blocks: */
+struct journal_header_t {
+ __u32 h_magic;
+ __u32 h_blocktype;
+ __u32 h_sequence;
+};
+
+/* The journal superblock. All fields are in big-endian byte order. */
+struct journal_superblock_t {
+ /* 0x0000 */
+ struct journal_header_t s_header;
+
+ /* Static information describing the journal */
+ __u32 s_blocksize; /* journal device blocksize */
+ __u32 s_maxlen; /* total blocks in journal file */
+ __u32 s_first; /* first block of log information */
+
+ /* Dynamic information describing the current state of the log */
+ __u32 s_sequence; /* first commit ID expected in log */
+ __u32 s_start; /* blocknr of start of log */
+
+ /* Error value, as set by journal_abort(). */
+ __s32 s_errno;
+
+ /* Remaining fields are only valid in a version-2 superblock */
+ __u32 s_feature_compat; /* compatible feature set */
+ __u32 s_feature_incompat; /* incompatible feature set */
+ __u32 s_feature_ro_compat; /* readonly-compatible feature set */
+ /* 0x0030 */
+ __u8 s_uuid[16]; /* 128-bit uuid for journal */
+
+ /* 0x0040 */
+ __u32 s_nr_users; /* Nr of filesystems sharing log */
+
+ __u32 s_dynsuper; /* Blocknr of dynamic superblock copy */
+
+ /* 0x0048 */
+ __u32 s_max_transaction; /* Limit of journal blocks per trans. */
+ __u32 s_max_trans_data; /* Limit of data blocks per trans. */
+
+ /* 0x0050 */
+ __u32 s_padding[44];
+
+ /* 0x0100 */
+ __u8 s_users[16 * 48]; /* ids of all fs'es sharing the log */
+ /* 0x0400 */
+} ;
+
+struct ext3_journal_block_tag {
+ uint32_t block;
+ uint32_t flags;
+};
+
+struct journal_revoke_header_t {
+ struct journal_header_t r_header;
+ int r_count; /* Count of bytes used in the block */
+};
+
+struct revoke_blk_list {
+ char *content; /* revoke block itself */
+ struct revoke_blk_list *next;
+};
+
+extern struct ext2_data *ext4fs_root;
+
+int ext4fs_init_journal(void);
+int ext4fs_log_gdt(char *gd_table);
+int ext4fs_check_journal_state(int recovery_flag);
+int ext4fs_log_journal(char *journal_buffer, long int blknr);
+int ext4fs_put_metadata(char *metadata_buffer, long int blknr);
+void ext4fs_update_journal(void);
+void ext4fs_dump_metadata(void);
+void ext4fs_push_revoke_blk(char *buffer);
+void ext4fs_free_journal(void);
+void ext4fs_free_revoke_blks(void);
+#endif
diff --git a/qemu/roms/u-boot/fs/ext4/ext4_write.c b/qemu/roms/u-boot/fs/ext4/ext4_write.c
new file mode 100644
index 000000000..c42add9a7
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/ext4_write.c
@@ -0,0 +1,977 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * ext4ls and ext4load : Based on ext2 ls and load support in Uboot.
+ * Ext4 read optimization taken from Open-Moko
+ * Qi bootloader
+ *
+ * (C) Copyright 2004
+ * esd gmbh <www.esd-electronics.com>
+ * Reinhard Arlt <reinhard.arlt@esd-electronics.com>
+ *
+ * based on code from grub2 fs/ext2.c and fs/fshelp.c by
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ *
+ * ext4write : Based on generic ext4 protocol.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+
+#include <common.h>
+#include <linux/stat.h>
+#include <div64.h>
+#include "ext4_common.h"
+
+static void ext4fs_update(void)
+{
+ short i;
+ ext4fs_update_journal();
+ struct ext_filesystem *fs = get_fs();
+
+ /* update super block */
+ put_ext4((uint64_t)(SUPERBLOCK_SIZE),
+ (struct ext2_sblock *)fs->sb, (uint32_t)SUPERBLOCK_SIZE);
+
+ /* update block groups */
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ fs->bgd[i].bg_checksum = ext4fs_checksum_update(i);
+ put_ext4((uint64_t)((uint64_t)fs->bgd[i].block_id * (uint64_t)fs->blksz),
+ fs->blk_bmaps[i], fs->blksz);
+ }
+
+ /* update inode table groups */
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ put_ext4((uint64_t) ((uint64_t)fs->bgd[i].inode_id * (uint64_t)fs->blksz),
+ fs->inode_bmaps[i], fs->blksz);
+ }
+
+ /* update the block group descriptor table */
+ put_ext4((uint64_t)((uint64_t)fs->gdtable_blkno * (uint64_t)fs->blksz),
+ (struct ext2_block_group *)fs->gdtable,
+ (fs->blksz * fs->no_blk_pergdt));
+
+ ext4fs_dump_metadata();
+
+ gindex = 0;
+ gd_index = 0;
+}
+
+int ext4fs_get_bgdtable(void)
+{
+ int status;
+ int grp_desc_size;
+ struct ext_filesystem *fs = get_fs();
+ grp_desc_size = sizeof(struct ext2_block_group);
+ fs->no_blk_pergdt = (fs->no_blkgrp * grp_desc_size) / fs->blksz;
+ if ((fs->no_blkgrp * grp_desc_size) % fs->blksz)
+ fs->no_blk_pergdt++;
+
+ /* allocate memory for gdtable */
+ fs->gdtable = zalloc(fs->blksz * fs->no_blk_pergdt);
+ if (!fs->gdtable)
+ return -ENOMEM;
+ /* read the group descriptor table */
+ status = ext4fs_devread((lbaint_t)fs->gdtable_blkno * fs->sect_perblk,
+ 0, fs->blksz * fs->no_blk_pergdt, fs->gdtable);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_gdt(fs->gdtable)) {
+ printf("Error in ext4fs_log_gdt\n");
+ return -1;
+ }
+
+ return 0;
+fail:
+ free(fs->gdtable);
+ fs->gdtable = NULL;
+
+ return -1;
+}
+
+static void delete_single_indirect_block(struct ext2_inode *inode)
+{
+ struct ext2_block_group *bgd = NULL;
+ static int prev_bg_bmap_idx = -1;
+ long int blknr;
+ int remainder;
+ int bg_idx;
+ int status;
+ unsigned int blk_per_grp = ext4fs_root->sblock.blocks_per_group;
+ struct ext_filesystem *fs = get_fs();
+ char *journal_buffer = zalloc(fs->blksz);
+ if (!journal_buffer) {
+ printf("No memory\n");
+ return;
+ }
+ /* get block group descriptor table */
+ bgd = (struct ext2_block_group *)fs->gdtable;
+
+ /* deleting the single indirect block associated with inode */
+ if (inode->b.blocks.indir_block != 0) {
+ debug("SIPB releasing %u\n", inode->b.blocks.indir_block);
+ blknr = inode->b.blocks.indir_block;
+ bg_idx = blknr / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = blknr % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(blknr, fs->blk_bmaps[bg_idx], bg_idx);
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ status =
+ ext4fs_devread((lbaint_t)bgd[bg_idx].block_id *
+ fs->sect_perblk, 0, fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal
+ (journal_buffer, bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ }
+fail:
+ free(journal_buffer);
+}
+
+static void delete_double_indirect_block(struct ext2_inode *inode)
+{
+ int i;
+ short status;
+ static int prev_bg_bmap_idx = -1;
+ long int blknr;
+ int remainder;
+ int bg_idx;
+ unsigned int blk_per_grp = ext4fs_root->sblock.blocks_per_group;
+ unsigned int *di_buffer = NULL;
+ unsigned int *DIB_start_addr = NULL;
+ struct ext2_block_group *bgd = NULL;
+ struct ext_filesystem *fs = get_fs();
+ char *journal_buffer = zalloc(fs->blksz);
+ if (!journal_buffer) {
+ printf("No memory\n");
+ return;
+ }
+ /* get the block group descriptor table */
+ bgd = (struct ext2_block_group *)fs->gdtable;
+
+ if (inode->b.blocks.double_indir_block != 0) {
+ di_buffer = zalloc(fs->blksz);
+ if (!di_buffer) {
+ printf("No memory\n");
+ return;
+ }
+ DIB_start_addr = (unsigned int *)di_buffer;
+ blknr = inode->b.blocks.double_indir_block;
+ status = ext4fs_devread((lbaint_t)blknr * fs->sect_perblk, 0,
+ fs->blksz, (char *)di_buffer);
+ for (i = 0; i < fs->blksz / sizeof(int); i++) {
+ if (*di_buffer == 0)
+ break;
+
+ debug("DICB releasing %u\n", *di_buffer);
+ bg_idx = *di_buffer / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = *di_buffer % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(*di_buffer,
+ fs->blk_bmaps[bg_idx], bg_idx);
+ di_buffer++;
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ status = ext4fs_devread((lbaint_t)
+ bgd[bg_idx].block_id
+ * fs->sect_perblk, 0,
+ fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ }
+
+ /* removing the parent double indirect block */
+ blknr = inode->b.blocks.double_indir_block;
+ bg_idx = blknr / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = blknr % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(blknr, fs->blk_bmaps[bg_idx], bg_idx);
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ memset(journal_buffer, '\0', fs->blksz);
+ status = ext4fs_devread((lbaint_t)bgd[bg_idx].block_id *
+ fs->sect_perblk, 0, fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ debug("DIPB releasing %ld\n", blknr);
+ }
+fail:
+ free(DIB_start_addr);
+ free(journal_buffer);
+}
+
+static void delete_triple_indirect_block(struct ext2_inode *inode)
+{
+ int i, j;
+ short status;
+ static int prev_bg_bmap_idx = -1;
+ long int blknr;
+ int remainder;
+ int bg_idx;
+ unsigned int blk_per_grp = ext4fs_root->sblock.blocks_per_group;
+ unsigned int *tigp_buffer = NULL;
+ unsigned int *tib_start_addr = NULL;
+ unsigned int *tip_buffer = NULL;
+ unsigned int *tipb_start_addr = NULL;
+ struct ext2_block_group *bgd = NULL;
+ struct ext_filesystem *fs = get_fs();
+ char *journal_buffer = zalloc(fs->blksz);
+ if (!journal_buffer) {
+ printf("No memory\n");
+ return;
+ }
+ /* get block group descriptor table */
+ bgd = (struct ext2_block_group *)fs->gdtable;
+
+ if (inode->b.blocks.triple_indir_block != 0) {
+ tigp_buffer = zalloc(fs->blksz);
+ if (!tigp_buffer) {
+ printf("No memory\n");
+ return;
+ }
+ tib_start_addr = (unsigned int *)tigp_buffer;
+ blknr = inode->b.blocks.triple_indir_block;
+ status = ext4fs_devread((lbaint_t)blknr * fs->sect_perblk, 0,
+ fs->blksz, (char *)tigp_buffer);
+ for (i = 0; i < fs->blksz / sizeof(int); i++) {
+ if (*tigp_buffer == 0)
+ break;
+ debug("tigp buffer releasing %u\n", *tigp_buffer);
+
+ tip_buffer = zalloc(fs->blksz);
+ if (!tip_buffer)
+ goto fail;
+ tipb_start_addr = (unsigned int *)tip_buffer;
+ status = ext4fs_devread((lbaint_t)(*tigp_buffer) *
+ fs->sect_perblk, 0, fs->blksz,
+ (char *)tip_buffer);
+ for (j = 0; j < fs->blksz / sizeof(int); j++) {
+ if (*tip_buffer == 0)
+ break;
+ bg_idx = *tip_buffer / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = *tip_buffer % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+
+ ext4fs_reset_block_bmap(*tip_buffer,
+ fs->blk_bmaps[bg_idx],
+ bg_idx);
+
+ tip_buffer++;
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ status =
+ ext4fs_devread(
+ (lbaint_t)
+ bgd[bg_idx].block_id *
+ fs->sect_perblk, 0,
+ fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].
+ block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ }
+ free(tipb_start_addr);
+ tipb_start_addr = NULL;
+
+ /*
+ * removing the grand parent blocks
+ * which is connected to inode
+ */
+ bg_idx = *tigp_buffer / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = *tigp_buffer % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(*tigp_buffer,
+ fs->blk_bmaps[bg_idx], bg_idx);
+
+ tigp_buffer++;
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ memset(journal_buffer, '\0', fs->blksz);
+ status =
+ ext4fs_devread((lbaint_t)
+ bgd[bg_idx].block_id *
+ fs->sect_perblk, 0,
+ fs->blksz, journal_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ }
+
+ /* removing the grand parent triple indirect block */
+ blknr = inode->b.blocks.triple_indir_block;
+ bg_idx = blknr / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = blknr % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(blknr, fs->blk_bmaps[bg_idx], bg_idx);
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ memset(journal_buffer, '\0', fs->blksz);
+ status = ext4fs_devread((lbaint_t)bgd[bg_idx].block_id *
+ fs->sect_perblk, 0, fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ debug("tigp buffer itself releasing %ld\n", blknr);
+ }
+fail:
+ free(tib_start_addr);
+ free(tipb_start_addr);
+ free(journal_buffer);
+}
+
+static int ext4fs_delete_file(int inodeno)
+{
+ struct ext2_inode inode;
+ short status;
+ int i;
+ int remainder;
+ long int blknr;
+ int bg_idx;
+ int ibmap_idx;
+ char *read_buffer = NULL;
+ char *start_block_address = NULL;
+ unsigned int no_blocks;
+
+ static int prev_bg_bmap_idx = -1;
+ unsigned int inodes_per_block;
+ long int blkno;
+ unsigned int blkoff;
+ unsigned int blk_per_grp = ext4fs_root->sblock.blocks_per_group;
+ unsigned int inode_per_grp = ext4fs_root->sblock.inodes_per_group;
+ struct ext2_inode *inode_buffer = NULL;
+ struct ext2_block_group *bgd = NULL;
+ struct ext_filesystem *fs = get_fs();
+ char *journal_buffer = zalloc(fs->blksz);
+ if (!journal_buffer)
+ return -ENOMEM;
+ /* get the block group descriptor table */
+ bgd = (struct ext2_block_group *)fs->gdtable;
+ status = ext4fs_read_inode(ext4fs_root, inodeno, &inode);
+ if (status == 0)
+ goto fail;
+
+ /* read the block no allocated to a file */
+ no_blocks = inode.size / fs->blksz;
+ if (inode.size % fs->blksz)
+ no_blocks++;
+
+ if (le32_to_cpu(inode.flags) & EXT4_EXTENTS_FL) {
+ struct ext2fs_node *node_inode =
+ zalloc(sizeof(struct ext2fs_node));
+ if (!node_inode)
+ goto fail;
+ node_inode->data = ext4fs_root;
+ node_inode->ino = inodeno;
+ node_inode->inode_read = 0;
+ memcpy(&(node_inode->inode), &inode, sizeof(struct ext2_inode));
+
+ for (i = 0; i < no_blocks; i++) {
+ blknr = read_allocated_block(&(node_inode->inode), i);
+ bg_idx = blknr / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = blknr % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(blknr, fs->blk_bmaps[bg_idx],
+ bg_idx);
+ debug("EXT4_EXTENTS Block releasing %ld: %d\n",
+ blknr, bg_idx);
+
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ status =
+ ext4fs_devread((lbaint_t)
+ bgd[bg_idx].block_id *
+ fs->sect_perblk, 0,
+ fs->blksz, journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ }
+ if (node_inode) {
+ free(node_inode);
+ node_inode = NULL;
+ }
+ } else {
+
+ delete_single_indirect_block(&inode);
+ delete_double_indirect_block(&inode);
+ delete_triple_indirect_block(&inode);
+
+ /* read the block no allocated to a file */
+ no_blocks = inode.size / fs->blksz;
+ if (inode.size % fs->blksz)
+ no_blocks++;
+ for (i = 0; i < no_blocks; i++) {
+ blknr = read_allocated_block(&inode, i);
+ bg_idx = blknr / blk_per_grp;
+ if (fs->blksz == 1024) {
+ remainder = blknr % blk_per_grp;
+ if (!remainder)
+ bg_idx--;
+ }
+ ext4fs_reset_block_bmap(blknr, fs->blk_bmaps[bg_idx],
+ bg_idx);
+ debug("ActualB releasing %ld: %d\n", blknr, bg_idx);
+
+ bgd[bg_idx].free_blocks++;
+ fs->sb->free_blocks++;
+ /* journal backup */
+ if (prev_bg_bmap_idx != bg_idx) {
+ memset(journal_buffer, '\0', fs->blksz);
+ status = ext4fs_devread((lbaint_t)
+ bgd[bg_idx].block_id
+ * fs->sect_perblk,
+ 0, fs->blksz,
+ journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer,
+ bgd[bg_idx].block_id))
+ goto fail;
+ prev_bg_bmap_idx = bg_idx;
+ }
+ }
+ }
+
+ /* from the inode no to blockno */
+ inodes_per_block = fs->blksz / fs->inodesz;
+ ibmap_idx = inodeno / inode_per_grp;
+
+ /* get the block no */
+ inodeno--;
+ blkno = __le32_to_cpu(bgd[ibmap_idx].inode_table_id) +
+ (inodeno % __le32_to_cpu(inode_per_grp)) / inodes_per_block;
+
+ /* get the offset of the inode */
+ blkoff = ((inodeno) % inodes_per_block) * fs->inodesz;
+
+ /* read the block no containing the inode */
+ read_buffer = zalloc(fs->blksz);
+ if (!read_buffer)
+ goto fail;
+ start_block_address = read_buffer;
+ status = ext4fs_devread((lbaint_t)blkno * fs->sect_perblk,
+ 0, fs->blksz, read_buffer);
+ if (status == 0)
+ goto fail;
+
+ if (ext4fs_log_journal(read_buffer, blkno))
+ goto fail;
+
+ read_buffer = read_buffer + blkoff;
+ inode_buffer = (struct ext2_inode *)read_buffer;
+ memset(inode_buffer, '\0', sizeof(struct ext2_inode));
+
+ /* write the inode to original position in inode table */
+ if (ext4fs_put_metadata(start_block_address, blkno))
+ goto fail;
+
+ /* update the respective inode bitmaps */
+ inodeno++;
+ ext4fs_reset_inode_bmap(inodeno, fs->inode_bmaps[ibmap_idx], ibmap_idx);
+ bgd[ibmap_idx].free_inodes++;
+ fs->sb->free_inodes++;
+ /* journal backup */
+ memset(journal_buffer, '\0', fs->blksz);
+ status = ext4fs_devread((lbaint_t)bgd[ibmap_idx].inode_id *
+ fs->sect_perblk, 0, fs->blksz, journal_buffer);
+ if (status == 0)
+ goto fail;
+ if (ext4fs_log_journal(journal_buffer, bgd[ibmap_idx].inode_id))
+ goto fail;
+
+ ext4fs_update();
+ ext4fs_deinit();
+ ext4fs_reinit_global();
+
+ if (ext4fs_init() != 0) {
+ printf("error in File System init\n");
+ goto fail;
+ }
+
+ free(start_block_address);
+ free(journal_buffer);
+
+ return 0;
+fail:
+ free(start_block_address);
+ free(journal_buffer);
+
+ return -1;
+}
+
+int ext4fs_init(void)
+{
+ short status;
+ int i;
+ unsigned int real_free_blocks = 0;
+ struct ext_filesystem *fs = get_fs();
+
+ /* populate fs */
+ fs->blksz = EXT2_BLOCK_SIZE(ext4fs_root);
+ fs->inodesz = INODE_SIZE_FILESYSTEM(ext4fs_root);
+ fs->sect_perblk = fs->blksz >> fs->dev_desc->log2blksz;
+
+ /* get the superblock */
+ fs->sb = zalloc(SUPERBLOCK_SIZE);
+ if (!fs->sb)
+ return -ENOMEM;
+ if (!ext4_read_superblock((char *)fs->sb))
+ goto fail;
+
+ /* init journal */
+ if (ext4fs_init_journal())
+ goto fail;
+
+ /* get total no of blockgroups */
+ fs->no_blkgrp = (uint32_t)ext4fs_div_roundup(
+ (ext4fs_root->sblock.total_blocks -
+ ext4fs_root->sblock.first_data_block),
+ ext4fs_root->sblock.blocks_per_group);
+
+ /* get the block group descriptor table */
+ fs->gdtable_blkno = ((EXT2_MIN_BLOCK_SIZE == fs->blksz) + 1);
+ if (ext4fs_get_bgdtable() == -1) {
+ printf("Error in getting the block group descriptor table\n");
+ goto fail;
+ }
+ fs->bgd = (struct ext2_block_group *)fs->gdtable;
+
+ /* load all the available bitmap block of the partition */
+ fs->blk_bmaps = zalloc(fs->no_blkgrp * sizeof(char *));
+ if (!fs->blk_bmaps)
+ goto fail;
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ fs->blk_bmaps[i] = zalloc(fs->blksz);
+ if (!fs->blk_bmaps[i])
+ goto fail;
+ }
+
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ status =
+ ext4fs_devread((lbaint_t)fs->bgd[i].block_id *
+ fs->sect_perblk, 0,
+ fs->blksz, (char *)fs->blk_bmaps[i]);
+ if (status == 0)
+ goto fail;
+ }
+
+ /* load all the available inode bitmap of the partition */
+ fs->inode_bmaps = zalloc(fs->no_blkgrp * sizeof(unsigned char *));
+ if (!fs->inode_bmaps)
+ goto fail;
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ fs->inode_bmaps[i] = zalloc(fs->blksz);
+ if (!fs->inode_bmaps[i])
+ goto fail;
+ }
+
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ status = ext4fs_devread((lbaint_t)fs->bgd[i].inode_id *
+ fs->sect_perblk,
+ 0, fs->blksz,
+ (char *)fs->inode_bmaps[i]);
+ if (status == 0)
+ goto fail;
+ }
+
+ /*
+ * check filesystem consistency with free blocks of file system
+ * some time we observed that superblock freeblocks does not match
+ * with the blockgroups freeblocks when improper
+ * reboot of a linux kernel
+ */
+ for (i = 0; i < fs->no_blkgrp; i++)
+ real_free_blocks = real_free_blocks + fs->bgd[i].free_blocks;
+ if (real_free_blocks != fs->sb->free_blocks)
+ fs->sb->free_blocks = real_free_blocks;
+
+ return 0;
+fail:
+ ext4fs_deinit();
+
+ return -1;
+}
+
+void ext4fs_deinit(void)
+{
+ int i;
+ struct ext2_inode inode_journal;
+ struct journal_superblock_t *jsb;
+ long int blknr;
+ struct ext_filesystem *fs = get_fs();
+
+ /* free journal */
+ char *temp_buff = zalloc(fs->blksz);
+ if (temp_buff) {
+ ext4fs_read_inode(ext4fs_root, EXT2_JOURNAL_INO,
+ &inode_journal);
+ blknr = read_allocated_block(&inode_journal,
+ EXT2_JOURNAL_SUPERBLOCK);
+ ext4fs_devread((lbaint_t)blknr * fs->sect_perblk, 0, fs->blksz,
+ temp_buff);
+ jsb = (struct journal_superblock_t *)temp_buff;
+ jsb->s_start = cpu_to_be32(0);
+ put_ext4((uint64_t) ((uint64_t)blknr * (uint64_t)fs->blksz),
+ (struct journal_superblock_t *)temp_buff, fs->blksz);
+ free(temp_buff);
+ }
+ ext4fs_free_journal();
+
+ /* get the superblock */
+ ext4_read_superblock((char *)fs->sb);
+ fs->sb->feature_incompat &= ~EXT3_FEATURE_INCOMPAT_RECOVER;
+ put_ext4((uint64_t)(SUPERBLOCK_SIZE),
+ (struct ext2_sblock *)fs->sb, (uint32_t)SUPERBLOCK_SIZE);
+ free(fs->sb);
+ fs->sb = NULL;
+
+ if (fs->blk_bmaps) {
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ free(fs->blk_bmaps[i]);
+ fs->blk_bmaps[i] = NULL;
+ }
+ free(fs->blk_bmaps);
+ fs->blk_bmaps = NULL;
+ }
+
+ if (fs->inode_bmaps) {
+ for (i = 0; i < fs->no_blkgrp; i++) {
+ free(fs->inode_bmaps[i]);
+ fs->inode_bmaps[i] = NULL;
+ }
+ free(fs->inode_bmaps);
+ fs->inode_bmaps = NULL;
+ }
+
+
+ free(fs->gdtable);
+ fs->gdtable = NULL;
+ fs->bgd = NULL;
+ /*
+ * reinitiliazed the global inode and
+ * block bitmap first execution check variables
+ */
+ fs->first_pass_ibmap = 0;
+ fs->first_pass_bbmap = 0;
+ fs->curr_inode_no = 0;
+ fs->curr_blkno = 0;
+}
+
+static int ext4fs_write_file(struct ext2_inode *file_inode,
+ int pos, unsigned int len, char *buf)
+{
+ int i;
+ int blockcnt;
+ unsigned int filesize = __le32_to_cpu(file_inode->size);
+ struct ext_filesystem *fs = get_fs();
+ int log2blksz = fs->dev_desc->log2blksz;
+ int log2_fs_blocksize = LOG2_BLOCK_SIZE(ext4fs_root) - log2blksz;
+ int previous_block_number = -1;
+ int delayed_start = 0;
+ int delayed_extent = 0;
+ int delayed_next = 0;
+ char *delayed_buf = NULL;
+
+ /* Adjust len so it we can't read past the end of the file. */
+ if (len > filesize)
+ len = filesize;
+
+ blockcnt = ((len + pos) + fs->blksz - 1) / fs->blksz;
+
+ for (i = pos / fs->blksz; i < blockcnt; i++) {
+ long int blknr;
+ int blockend = fs->blksz;
+ int skipfirst = 0;
+ blknr = read_allocated_block(file_inode, i);
+ if (blknr < 0)
+ return -1;
+
+ blknr = blknr << log2_fs_blocksize;
+
+ if (blknr) {
+ if (previous_block_number != -1) {
+ if (delayed_next == blknr) {
+ delayed_extent += blockend;
+ delayed_next += blockend >> log2blksz;
+ } else { /* spill */
+ put_ext4((uint64_t)
+ ((uint64_t)delayed_start << log2blksz),
+ delayed_buf,
+ (uint32_t) delayed_extent);
+ previous_block_number = blknr;
+ delayed_start = blknr;
+ delayed_extent = blockend;
+ delayed_buf = buf;
+ delayed_next = blknr +
+ (blockend >> log2blksz);
+ }
+ } else {
+ previous_block_number = blknr;
+ delayed_start = blknr;
+ delayed_extent = blockend;
+ delayed_buf = buf;
+ delayed_next = blknr +
+ (blockend >> log2blksz);
+ }
+ } else {
+ if (previous_block_number != -1) {
+ /* spill */
+ put_ext4((uint64_t) ((uint64_t)delayed_start <<
+ log2blksz),
+ delayed_buf,
+ (uint32_t) delayed_extent);
+ previous_block_number = -1;
+ }
+ memset(buf, 0, fs->blksz - skipfirst);
+ }
+ buf += fs->blksz - skipfirst;
+ }
+ if (previous_block_number != -1) {
+ /* spill */
+ put_ext4((uint64_t) ((uint64_t)delayed_start << log2blksz),
+ delayed_buf, (uint32_t) delayed_extent);
+ previous_block_number = -1;
+ }
+
+ return len;
+}
+
+int ext4fs_write(const char *fname, unsigned char *buffer,
+ unsigned long sizebytes)
+{
+ int ret = 0;
+ struct ext2_inode *file_inode = NULL;
+ unsigned char *inode_buffer = NULL;
+ int parent_inodeno;
+ int inodeno;
+ time_t timestamp = 0;
+
+ uint64_t bytes_reqd_for_file;
+ unsigned int blks_reqd_for_file;
+ unsigned int blocks_remaining;
+ int existing_file_inodeno;
+ char *temp_ptr = NULL;
+ long int itable_blkno;
+ long int parent_itable_blkno;
+ long int blkoff;
+ struct ext2_sblock *sblock = &(ext4fs_root->sblock);
+ unsigned int inodes_per_block;
+ unsigned int ibmap_idx;
+ struct ext_filesystem *fs = get_fs();
+ ALLOC_CACHE_ALIGN_BUFFER(char, filename, 256);
+ memset(filename, 0x00, sizeof(filename));
+
+ g_parent_inode = zalloc(sizeof(struct ext2_inode));
+ if (!g_parent_inode)
+ goto fail;
+
+ if (ext4fs_init() != 0) {
+ printf("error in File System init\n");
+ return -1;
+ }
+ inodes_per_block = fs->blksz / fs->inodesz;
+ parent_inodeno = ext4fs_get_parent_inode_num(fname, filename, F_FILE);
+ if (parent_inodeno == -1)
+ goto fail;
+ if (ext4fs_iget(parent_inodeno, g_parent_inode))
+ goto fail;
+ /* check if the filename is already present in root */
+ existing_file_inodeno = ext4fs_filename_check(filename);
+ if (existing_file_inodeno != -1) {
+ ret = ext4fs_delete_file(existing_file_inodeno);
+ fs->first_pass_bbmap = 0;
+ fs->curr_blkno = 0;
+
+ fs->first_pass_ibmap = 0;
+ fs->curr_inode_no = 0;
+ if (ret)
+ goto fail;
+ }
+ /* calucalate how many blocks required */
+ bytes_reqd_for_file = sizebytes;
+ blks_reqd_for_file = lldiv(bytes_reqd_for_file, fs->blksz);
+ if (do_div(bytes_reqd_for_file, fs->blksz) != 0) {
+ blks_reqd_for_file++;
+ debug("total bytes for a file %u\n", blks_reqd_for_file);
+ }
+ blocks_remaining = blks_reqd_for_file;
+ /* test for available space in partition */
+ if (fs->sb->free_blocks < blks_reqd_for_file) {
+ printf("Not enough space on partition !!!\n");
+ goto fail;
+ }
+
+ ext4fs_update_parent_dentry(filename, &inodeno, FILETYPE_REG);
+ /* prepare file inode */
+ inode_buffer = zalloc(fs->inodesz);
+ if (!inode_buffer)
+ goto fail;
+ file_inode = (struct ext2_inode *)inode_buffer;
+ file_inode->mode = S_IFREG | S_IRWXU |
+ S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH;
+ /* ToDo: Update correct time */
+ file_inode->mtime = timestamp;
+ file_inode->atime = timestamp;
+ file_inode->ctime = timestamp;
+ file_inode->nlinks = 1;
+ file_inode->size = sizebytes;
+
+ /* Allocate data blocks */
+ ext4fs_allocate_blocks(file_inode, blocks_remaining,
+ &blks_reqd_for_file);
+ file_inode->blockcnt = (blks_reqd_for_file * fs->blksz) >>
+ fs->dev_desc->log2blksz;
+
+ temp_ptr = zalloc(fs->blksz);
+ if (!temp_ptr)
+ goto fail;
+ ibmap_idx = inodeno / ext4fs_root->sblock.inodes_per_group;
+ inodeno--;
+ itable_blkno = __le32_to_cpu(fs->bgd[ibmap_idx].inode_table_id) +
+ (inodeno % __le32_to_cpu(sblock->inodes_per_group)) /
+ inodes_per_block;
+ blkoff = (inodeno % inodes_per_block) * fs->inodesz;
+ ext4fs_devread((lbaint_t)itable_blkno * fs->sect_perblk, 0, fs->blksz,
+ temp_ptr);
+ if (ext4fs_log_journal(temp_ptr, itable_blkno))
+ goto fail;
+
+ memcpy(temp_ptr + blkoff, inode_buffer, fs->inodesz);
+ if (ext4fs_put_metadata(temp_ptr, itable_blkno))
+ goto fail;
+ /* copy the file content into data blocks */
+ if (ext4fs_write_file(file_inode, 0, sizebytes, (char *)buffer) == -1) {
+ printf("Error in copying content\n");
+ goto fail;
+ }
+ ibmap_idx = parent_inodeno / ext4fs_root->sblock.inodes_per_group;
+ parent_inodeno--;
+ parent_itable_blkno = __le32_to_cpu(fs->bgd[ibmap_idx].inode_table_id) +
+ (parent_inodeno %
+ __le32_to_cpu(sblock->inodes_per_group)) / inodes_per_block;
+ blkoff = (parent_inodeno % inodes_per_block) * fs->inodesz;
+ if (parent_itable_blkno != itable_blkno) {
+ memset(temp_ptr, '\0', fs->blksz);
+ ext4fs_devread((lbaint_t)parent_itable_blkno * fs->sect_perblk,
+ 0, fs->blksz, temp_ptr);
+ if (ext4fs_log_journal(temp_ptr, parent_itable_blkno))
+ goto fail;
+
+ memcpy(temp_ptr + blkoff, g_parent_inode,
+ sizeof(struct ext2_inode));
+ if (ext4fs_put_metadata(temp_ptr, parent_itable_blkno))
+ goto fail;
+ free(temp_ptr);
+ } else {
+ /*
+ * If parent and child fall in same inode table block
+ * both should be kept in 1 buffer
+ */
+ memcpy(temp_ptr + blkoff, g_parent_inode,
+ sizeof(struct ext2_inode));
+ gd_index--;
+ if (ext4fs_put_metadata(temp_ptr, itable_blkno))
+ goto fail;
+ free(temp_ptr);
+ }
+ ext4fs_update();
+ ext4fs_deinit();
+
+ fs->first_pass_bbmap = 0;
+ fs->curr_blkno = 0;
+ fs->first_pass_ibmap = 0;
+ fs->curr_inode_no = 0;
+ free(inode_buffer);
+ free(g_parent_inode);
+ g_parent_inode = NULL;
+
+ return 0;
+fail:
+ ext4fs_deinit();
+ free(inode_buffer);
+ free(g_parent_inode);
+ g_parent_inode = NULL;
+
+ return -1;
+}
diff --git a/qemu/roms/u-boot/fs/ext4/ext4fs.c b/qemu/roms/u-boot/fs/ext4/ext4fs.c
new file mode 100644
index 000000000..417ce7b63
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ext4/ext4fs.c
@@ -0,0 +1,228 @@
+/*
+ * (C) Copyright 2011 - 2012 Samsung Electronics
+ * EXT4 filesystem implementation in Uboot by
+ * Uma Shankar <uma.shankar@samsung.com>
+ * Manjunatha C Achar <a.manjunatha@samsung.com>
+ *
+ * ext4ls and ext4load : Based on ext2 ls and load support in Uboot.
+ * Ext4 read optimization taken from Open-Moko
+ * Qi bootloader
+ *
+ * (C) Copyright 2004
+ * esd gmbh <www.esd-electronics.com>
+ * Reinhard Arlt <reinhard.arlt@esd-electronics.com>
+ *
+ * based on code from grub2 fs/ext2.c and fs/fshelp.c by
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+ *
+ * ext4write : Based on generic ext4 protocol.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <ext_common.h>
+#include <ext4fs.h>
+#include "ext4_common.h"
+
+int ext4fs_symlinknest;
+struct ext_filesystem ext_fs;
+
+struct ext_filesystem *get_fs(void)
+{
+ return &ext_fs;
+}
+
+void ext4fs_free_node(struct ext2fs_node *node, struct ext2fs_node *currroot)
+{
+ if ((node != &ext4fs_root->diropen) && (node != currroot))
+ free(node);
+}
+
+/*
+ * Taken from openmoko-kernel mailing list: By Andy green
+ * Optimized read file API : collects and defers contiguous sector
+ * reads into one potentially more efficient larger sequential read action
+ */
+int ext4fs_read_file(struct ext2fs_node *node, int pos,
+ unsigned int len, char *buf)
+{
+ struct ext_filesystem *fs = get_fs();
+ int i;
+ lbaint_t blockcnt;
+ int log2blksz = fs->dev_desc->log2blksz;
+ int log2_fs_blocksize = LOG2_BLOCK_SIZE(node->data) - log2blksz;
+ int blocksize = (1 << (log2_fs_blocksize + log2blksz));
+ unsigned int filesize = __le32_to_cpu(node->inode.size);
+ lbaint_t previous_block_number = -1;
+ lbaint_t delayed_start = 0;
+ lbaint_t delayed_extent = 0;
+ lbaint_t delayed_skipfirst = 0;
+ lbaint_t delayed_next = 0;
+ char *delayed_buf = NULL;
+ short status;
+
+ /* Adjust len so it we can't read past the end of the file. */
+ if (len > filesize)
+ len = filesize;
+
+ blockcnt = ((len + pos) + blocksize - 1) / blocksize;
+
+ for (i = pos / blocksize; i < blockcnt; i++) {
+ lbaint_t blknr;
+ int blockoff = pos % blocksize;
+ int blockend = blocksize;
+ int skipfirst = 0;
+ blknr = read_allocated_block(&(node->inode), i);
+ if (blknr < 0)
+ return -1;
+
+ blknr = blknr << log2_fs_blocksize;
+
+ /* Last block. */
+ if (i == blockcnt - 1) {
+ blockend = (len + pos) % blocksize;
+
+ /* The last portion is exactly blocksize. */
+ if (!blockend)
+ blockend = blocksize;
+ }
+
+ /* First block. */
+ if (i == pos / blocksize) {
+ skipfirst = blockoff;
+ blockend -= skipfirst;
+ }
+ if (blknr) {
+ int status;
+
+ if (previous_block_number != -1) {
+ if (delayed_next == blknr) {
+ delayed_extent += blockend;
+ delayed_next += blockend >> log2blksz;
+ } else { /* spill */
+ status = ext4fs_devread(delayed_start,
+ delayed_skipfirst,
+ delayed_extent,
+ delayed_buf);
+ if (status == 0)
+ return -1;
+ previous_block_number = blknr;
+ delayed_start = blknr;
+ delayed_extent = blockend;
+ delayed_skipfirst = skipfirst;
+ delayed_buf = buf;
+ delayed_next = blknr +
+ (blockend >> log2blksz);
+ }
+ } else {
+ previous_block_number = blknr;
+ delayed_start = blknr;
+ delayed_extent = blockend;
+ delayed_skipfirst = skipfirst;
+ delayed_buf = buf;
+ delayed_next = blknr +
+ (blockend >> log2blksz);
+ }
+ } else {
+ if (previous_block_number != -1) {
+ /* spill */
+ status = ext4fs_devread(delayed_start,
+ delayed_skipfirst,
+ delayed_extent,
+ delayed_buf);
+ if (status == 0)
+ return -1;
+ previous_block_number = -1;
+ }
+ memset(buf, 0, blocksize - skipfirst);
+ }
+ buf += blocksize - skipfirst;
+ }
+ if (previous_block_number != -1) {
+ /* spill */
+ status = ext4fs_devread(delayed_start,
+ delayed_skipfirst, delayed_extent,
+ delayed_buf);
+ if (status == 0)
+ return -1;
+ previous_block_number = -1;
+ }
+
+ return len;
+}
+
+int ext4fs_ls(const char *dirname)
+{
+ struct ext2fs_node *dirnode;
+ int status;
+
+ if (dirname == NULL)
+ return 0;
+
+ status = ext4fs_find_file(dirname, &ext4fs_root->diropen, &dirnode,
+ FILETYPE_DIRECTORY);
+ if (status != 1) {
+ printf("** Can not find directory. **\n");
+ return 1;
+ }
+
+ ext4fs_iterate_dir(dirnode, NULL, NULL, NULL);
+ ext4fs_free_node(dirnode, &ext4fs_root->diropen);
+
+ return 0;
+}
+
+int ext4fs_exists(const char *filename)
+{
+ int file_len;
+
+ file_len = ext4fs_open(filename);
+ return file_len >= 0;
+}
+
+int ext4fs_read(char *buf, unsigned len)
+{
+ if (ext4fs_root == NULL || ext4fs_file == NULL)
+ return 0;
+
+ return ext4fs_read_file(ext4fs_file, 0, len, buf);
+}
+
+int ext4fs_probe(block_dev_desc_t *fs_dev_desc,
+ disk_partition_t *fs_partition)
+{
+ ext4fs_set_blk_dev(fs_dev_desc, fs_partition);
+
+ if (!ext4fs_mount(fs_partition->size)) {
+ ext4fs_close();
+ return -1;
+ }
+
+ return 0;
+}
+
+int ext4_read_file(const char *filename, void *buf, int offset, int len)
+{
+ int file_len;
+ int len_read;
+
+ if (offset != 0) {
+ printf("** Cannot support non-zero offset **\n");
+ return -1;
+ }
+
+ file_len = ext4fs_open(filename);
+ if (file_len < 0) {
+ printf("** File not found %s **\n", filename);
+ return -1;
+ }
+
+ if (len == 0)
+ len = file_len;
+
+ len_read = ext4fs_read(buf, len);
+
+ return len_read;
+}
diff --git a/qemu/roms/u-boot/fs/fat/Makefile b/qemu/roms/u-boot/fs/fat/Makefile
new file mode 100644
index 000000000..b60e8486c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/fat/Makefile
@@ -0,0 +1,11 @@
+#
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-$(CONFIG_FS_FAT) := fat.o
+obj-$(CONFIG_FAT_WRITE):= fat_write.o
+
+ifndef CONFIG_SPL_BUILD
+obj-$(CONFIG_FS_FAT) += file.o
+endif
diff --git a/qemu/roms/u-boot/fs/fat/fat.c b/qemu/roms/u-boot/fs/fat/fat.c
new file mode 100644
index 000000000..54f42eae0
--- /dev/null
+++ b/qemu/roms/u-boot/fs/fat/fat.c
@@ -0,0 +1,1273 @@
+/*
+ * fat.c
+ *
+ * R/O (V)FAT 12/16/32 filesystem implementation by Marcus Sundberg
+ *
+ * 2002-07-28 - rjones@nexus-tech.net - ported to ppcboot v1.1.6
+ * 2003-03-10 - kharris@nexus-tech.net - ported to uboot
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <config.h>
+#include <exports.h>
+#include <fat.h>
+#include <asm/byteorder.h>
+#include <part.h>
+#include <malloc.h>
+#include <linux/compiler.h>
+#include <linux/ctype.h>
+
+#ifdef CONFIG_SUPPORT_VFAT
+static const int vfat_enabled = 1;
+#else
+static const int vfat_enabled = 0;
+#endif
+
+/*
+ * Convert a string to lowercase.
+ */
+static void downcase(char *str)
+{
+ while (*str != '\0') {
+ *str = tolower(*str);
+ str++;
+ }
+}
+
+static block_dev_desc_t *cur_dev;
+static disk_partition_t cur_part_info;
+
+#define DOS_BOOT_MAGIC_OFFSET 0x1fe
+#define DOS_FS_TYPE_OFFSET 0x36
+#define DOS_FS32_TYPE_OFFSET 0x52
+
+static int disk_read(__u32 block, __u32 nr_blocks, void *buf)
+{
+ if (!cur_dev || !cur_dev->block_read)
+ return -1;
+
+ return cur_dev->block_read(cur_dev->dev,
+ cur_part_info.start + block, nr_blocks, buf);
+}
+
+int fat_set_blk_dev(block_dev_desc_t *dev_desc, disk_partition_t *info)
+{
+ ALLOC_CACHE_ALIGN_BUFFER(unsigned char, buffer, dev_desc->blksz);
+
+ cur_dev = dev_desc;
+ cur_part_info = *info;
+
+ /* Make sure it has a valid FAT header */
+ if (disk_read(0, 1, buffer) != 1) {
+ cur_dev = NULL;
+ return -1;
+ }
+
+ /* Check if it's actually a DOS volume */
+ if (memcmp(buffer + DOS_BOOT_MAGIC_OFFSET, "\x55\xAA", 2)) {
+ cur_dev = NULL;
+ return -1;
+ }
+
+ /* Check for FAT12/FAT16/FAT32 filesystem */
+ if (!memcmp(buffer + DOS_FS_TYPE_OFFSET, "FAT", 3))
+ return 0;
+ if (!memcmp(buffer + DOS_FS32_TYPE_OFFSET, "FAT32", 5))
+ return 0;
+
+ cur_dev = NULL;
+ return -1;
+}
+
+int fat_register_device(block_dev_desc_t *dev_desc, int part_no)
+{
+ disk_partition_t info;
+
+ /* First close any currently found FAT filesystem */
+ cur_dev = NULL;
+
+ /* Read the partition table, if present */
+ if (get_partition_info(dev_desc, part_no, &info)) {
+ if (part_no != 0) {
+ printf("** Partition %d not valid on device %d **\n",
+ part_no, dev_desc->dev);
+ return -1;
+ }
+
+ info.start = 0;
+ info.size = dev_desc->lba;
+ info.blksz = dev_desc->blksz;
+ info.name[0] = 0;
+ info.type[0] = 0;
+ info.bootable = 0;
+#ifdef CONFIG_PARTITION_UUIDS
+ info.uuid[0] = 0;
+#endif
+ }
+
+ return fat_set_blk_dev(dev_desc, &info);
+}
+
+/*
+ * Get the first occurence of a directory delimiter ('/' or '\') in a string.
+ * Return index into string if found, -1 otherwise.
+ */
+static int dirdelim(char *str)
+{
+ char *start = str;
+
+ while (*str != '\0') {
+ if (ISDIRDELIM(*str))
+ return str - start;
+ str++;
+ }
+ return -1;
+}
+
+/*
+ * Extract zero terminated short name from a directory entry.
+ */
+static void get_name(dir_entry *dirent, char *s_name)
+{
+ char *ptr;
+
+ memcpy(s_name, dirent->name, 8);
+ s_name[8] = '\0';
+ ptr = s_name;
+ while (*ptr && *ptr != ' ')
+ ptr++;
+ if (dirent->ext[0] && dirent->ext[0] != ' ') {
+ *ptr = '.';
+ ptr++;
+ memcpy(ptr, dirent->ext, 3);
+ ptr[3] = '\0';
+ while (*ptr && *ptr != ' ')
+ ptr++;
+ }
+ *ptr = '\0';
+ if (*s_name == DELETED_FLAG)
+ *s_name = '\0';
+ else if (*s_name == aRING)
+ *s_name = DELETED_FLAG;
+ downcase(s_name);
+}
+
+/*
+ * Get the entry at index 'entry' in a FAT (12/16/32) table.
+ * On failure 0x00 is returned.
+ */
+static __u32 get_fatent(fsdata *mydata, __u32 entry)
+{
+ __u32 bufnum;
+ __u32 off16, offset;
+ __u32 ret = 0x00;
+ __u16 val1, val2;
+
+ switch (mydata->fatsize) {
+ case 32:
+ bufnum = entry / FAT32BUFSIZE;
+ offset = entry - bufnum * FAT32BUFSIZE;
+ break;
+ case 16:
+ bufnum = entry / FAT16BUFSIZE;
+ offset = entry - bufnum * FAT16BUFSIZE;
+ break;
+ case 12:
+ bufnum = entry / FAT12BUFSIZE;
+ offset = entry - bufnum * FAT12BUFSIZE;
+ break;
+
+ default:
+ /* Unsupported FAT size */
+ return ret;
+ }
+
+ debug("FAT%d: entry: 0x%04x = %d, offset: 0x%04x = %d\n",
+ mydata->fatsize, entry, entry, offset, offset);
+
+ /* Read a new block of FAT entries into the cache. */
+ if (bufnum != mydata->fatbufnum) {
+ __u32 getsize = FATBUFBLOCKS;
+ __u8 *bufptr = mydata->fatbuf;
+ __u32 fatlength = mydata->fatlength;
+ __u32 startblock = bufnum * FATBUFBLOCKS;
+
+ if (startblock + getsize > fatlength)
+ getsize = fatlength - startblock;
+
+ startblock += mydata->fat_sect; /* Offset from start of disk */
+
+ if (disk_read(startblock, getsize, bufptr) < 0) {
+ debug("Error reading FAT blocks\n");
+ return ret;
+ }
+ mydata->fatbufnum = bufnum;
+ }
+
+ /* Get the actual entry from the table */
+ switch (mydata->fatsize) {
+ case 32:
+ ret = FAT2CPU32(((__u32 *) mydata->fatbuf)[offset]);
+ break;
+ case 16:
+ ret = FAT2CPU16(((__u16 *) mydata->fatbuf)[offset]);
+ break;
+ case 12:
+ off16 = (offset * 3) / 4;
+
+ switch (offset & 0x3) {
+ case 0:
+ ret = FAT2CPU16(((__u16 *) mydata->fatbuf)[off16]);
+ ret &= 0xfff;
+ break;
+ case 1:
+ val1 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16]);
+ val1 &= 0xf000;
+ val2 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16 + 1]);
+ val2 &= 0x00ff;
+ ret = (val2 << 4) | (val1 >> 12);
+ break;
+ case 2:
+ val1 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16]);
+ val1 &= 0xff00;
+ val2 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16 + 1]);
+ val2 &= 0x000f;
+ ret = (val2 << 8) | (val1 >> 8);
+ break;
+ case 3:
+ ret = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16]);
+ ret = (ret & 0xfff0) >> 4;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ debug("FAT%d: ret: %08x, offset: %04x\n",
+ mydata->fatsize, ret, offset);
+
+ return ret;
+}
+
+/*
+ * Read at most 'size' bytes from the specified cluster into 'buffer'.
+ * Return 0 on success, -1 otherwise.
+ */
+static int
+get_cluster(fsdata *mydata, __u32 clustnum, __u8 *buffer, unsigned long size)
+{
+ __u32 idx = 0;
+ __u32 startsect;
+ int ret;
+
+ if (clustnum > 0) {
+ startsect = mydata->data_begin +
+ clustnum * mydata->clust_size;
+ } else {
+ startsect = mydata->rootdir_sect;
+ }
+
+ debug("gc - clustnum: %d, startsect: %d\n", clustnum, startsect);
+
+ if ((unsigned long)buffer & (ARCH_DMA_MINALIGN - 1)) {
+ ALLOC_CACHE_ALIGN_BUFFER(__u8, tmpbuf, mydata->sect_size);
+
+ printf("FAT: Misaligned buffer address (%p)\n", buffer);
+
+ while (size >= mydata->sect_size) {
+ ret = disk_read(startsect++, 1, tmpbuf);
+ if (ret != 1) {
+ debug("Error reading data (got %d)\n", ret);
+ return -1;
+ }
+
+ memcpy(buffer, tmpbuf, mydata->sect_size);
+ buffer += mydata->sect_size;
+ size -= mydata->sect_size;
+ }
+ } else {
+ idx = size / mydata->sect_size;
+ ret = disk_read(startsect, idx, buffer);
+ if (ret != idx) {
+ debug("Error reading data (got %d)\n", ret);
+ return -1;
+ }
+ startsect += idx;
+ idx *= mydata->sect_size;
+ buffer += idx;
+ size -= idx;
+ }
+ if (size) {
+ ALLOC_CACHE_ALIGN_BUFFER(__u8, tmpbuf, mydata->sect_size);
+
+ ret = disk_read(startsect, 1, tmpbuf);
+ if (ret != 1) {
+ debug("Error reading data (got %d)\n", ret);
+ return -1;
+ }
+
+ memcpy(buffer, tmpbuf, size);
+ }
+
+ return 0;
+}
+
+/*
+ * Read at most 'maxsize' bytes from 'pos' in the file associated with 'dentptr'
+ * into 'buffer'.
+ * Return the number of bytes read or -1 on fatal errors.
+ */
+__u8 get_contents_vfatname_block[MAX_CLUSTSIZE]
+ __aligned(ARCH_DMA_MINALIGN);
+
+static long
+get_contents(fsdata *mydata, dir_entry *dentptr, unsigned long pos,
+ __u8 *buffer, unsigned long maxsize)
+{
+ unsigned long filesize = FAT2CPU32(dentptr->size), gotsize = 0;
+ unsigned int bytesperclust = mydata->clust_size * mydata->sect_size;
+ __u32 curclust = START(dentptr);
+ __u32 endclust, newclust;
+ unsigned long actsize;
+
+ debug("Filesize: %ld bytes\n", filesize);
+
+ if (pos >= filesize) {
+ debug("Read position past EOF: %lu\n", pos);
+ return gotsize;
+ }
+
+ if (maxsize > 0 && filesize > pos + maxsize)
+ filesize = pos + maxsize;
+
+ debug("%ld bytes\n", filesize);
+
+ actsize = bytesperclust;
+
+ /* go to cluster at pos */
+ while (actsize <= pos) {
+ curclust = get_fatent(mydata, curclust);
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ debug("Invalid FAT entry\n");
+ return gotsize;
+ }
+ actsize += bytesperclust;
+ }
+
+ /* actsize > pos */
+ actsize -= bytesperclust;
+ filesize -= actsize;
+ pos -= actsize;
+
+ /* align to beginning of next cluster if any */
+ if (pos) {
+ actsize = min(filesize, bytesperclust);
+ if (get_cluster(mydata, curclust, get_contents_vfatname_block,
+ (int)actsize) != 0) {
+ printf("Error reading cluster\n");
+ return -1;
+ }
+ filesize -= actsize;
+ actsize -= pos;
+ memcpy(buffer, get_contents_vfatname_block + pos, actsize);
+ gotsize += actsize;
+ if (!filesize)
+ return gotsize;
+ buffer += actsize;
+
+ curclust = get_fatent(mydata, curclust);
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ debug("Invalid FAT entry\n");
+ return gotsize;
+ }
+ }
+
+ actsize = bytesperclust;
+ endclust = curclust;
+
+ do {
+ /* search for consecutive clusters */
+ while (actsize < filesize) {
+ newclust = get_fatent(mydata, endclust);
+ if ((newclust - 1) != endclust)
+ goto getit;
+ if (CHECK_CLUST(newclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", newclust);
+ debug("Invalid FAT entry\n");
+ return gotsize;
+ }
+ endclust = newclust;
+ actsize += bytesperclust;
+ }
+
+ /* get remaining bytes */
+ actsize = filesize;
+ if (get_cluster(mydata, curclust, buffer, (int)actsize) != 0) {
+ printf("Error reading cluster\n");
+ return -1;
+ }
+ gotsize += actsize;
+ return gotsize;
+getit:
+ if (get_cluster(mydata, curclust, buffer, (int)actsize) != 0) {
+ printf("Error reading cluster\n");
+ return -1;
+ }
+ gotsize += (int)actsize;
+ filesize -= actsize;
+ buffer += actsize;
+
+ curclust = get_fatent(mydata, endclust);
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ printf("Invalid FAT entry\n");
+ return gotsize;
+ }
+ actsize = bytesperclust;
+ endclust = curclust;
+ } while (1);
+}
+
+/*
+ * Extract the file name information from 'slotptr' into 'l_name',
+ * starting at l_name[*idx].
+ * Return 1 if terminator (zero byte) is found, 0 otherwise.
+ */
+static int slot2str(dir_slot *slotptr, char *l_name, int *idx)
+{
+ int j;
+
+ for (j = 0; j <= 8; j += 2) {
+ l_name[*idx] = slotptr->name0_4[j];
+ if (l_name[*idx] == 0x00)
+ return 1;
+ (*idx)++;
+ }
+ for (j = 0; j <= 10; j += 2) {
+ l_name[*idx] = slotptr->name5_10[j];
+ if (l_name[*idx] == 0x00)
+ return 1;
+ (*idx)++;
+ }
+ for (j = 0; j <= 2; j += 2) {
+ l_name[*idx] = slotptr->name11_12[j];
+ if (l_name[*idx] == 0x00)
+ return 1;
+ (*idx)++;
+ }
+
+ return 0;
+}
+
+/*
+ * Extract the full long filename starting at 'retdent' (which is really
+ * a slot) into 'l_name'. If successful also copy the real directory entry
+ * into 'retdent'
+ * Return 0 on success, -1 otherwise.
+ */
+static int
+get_vfatname(fsdata *mydata, int curclust, __u8 *cluster,
+ dir_entry *retdent, char *l_name)
+{
+ dir_entry *realdent;
+ dir_slot *slotptr = (dir_slot *)retdent;
+ __u8 *buflimit = cluster + mydata->sect_size * ((curclust == 0) ?
+ PREFETCH_BLOCKS :
+ mydata->clust_size);
+ __u8 counter = (slotptr->id & ~LAST_LONG_ENTRY_MASK) & 0xff;
+ int idx = 0;
+
+ if (counter > VFAT_MAXSEQ) {
+ debug("Error: VFAT name is too long\n");
+ return -1;
+ }
+
+ while ((__u8 *)slotptr < buflimit) {
+ if (counter == 0)
+ break;
+ if (((slotptr->id & ~LAST_LONG_ENTRY_MASK) & 0xff) != counter)
+ return -1;
+ slotptr++;
+ counter--;
+ }
+
+ if ((__u8 *)slotptr >= buflimit) {
+ dir_slot *slotptr2;
+
+ if (curclust == 0)
+ return -1;
+ curclust = get_fatent(mydata, curclust);
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ printf("Invalid FAT entry\n");
+ return -1;
+ }
+
+ if (get_cluster(mydata, curclust, get_contents_vfatname_block,
+ mydata->clust_size * mydata->sect_size) != 0) {
+ debug("Error: reading directory block\n");
+ return -1;
+ }
+
+ slotptr2 = (dir_slot *)get_contents_vfatname_block;
+ while (counter > 0) {
+ if (((slotptr2->id & ~LAST_LONG_ENTRY_MASK)
+ & 0xff) != counter)
+ return -1;
+ slotptr2++;
+ counter--;
+ }
+
+ /* Save the real directory entry */
+ realdent = (dir_entry *)slotptr2;
+ while ((__u8 *)slotptr2 > get_contents_vfatname_block) {
+ slotptr2--;
+ slot2str(slotptr2, l_name, &idx);
+ }
+ } else {
+ /* Save the real directory entry */
+ realdent = (dir_entry *)slotptr;
+ }
+
+ do {
+ slotptr--;
+ if (slot2str(slotptr, l_name, &idx))
+ break;
+ } while (!(slotptr->id & LAST_LONG_ENTRY_MASK));
+
+ l_name[idx] = '\0';
+ if (*l_name == DELETED_FLAG)
+ *l_name = '\0';
+ else if (*l_name == aRING)
+ *l_name = DELETED_FLAG;
+ downcase(l_name);
+
+ /* Return the real directory entry */
+ memcpy(retdent, realdent, sizeof(dir_entry));
+
+ return 0;
+}
+
+/* Calculate short name checksum */
+static __u8 mkcksum(const char name[8], const char ext[3])
+{
+ int i;
+
+ __u8 ret = 0;
+
+ for (i = 0; i < 8; i++)
+ ret = (((ret & 1) << 7) | ((ret & 0xfe) >> 1)) + name[i];
+ for (i = 0; i < 3; i++)
+ ret = (((ret & 1) << 7) | ((ret & 0xfe) >> 1)) + ext[i];
+
+ return ret;
+}
+
+/*
+ * Get the directory entry associated with 'filename' from the directory
+ * starting at 'startsect'
+ */
+__u8 get_dentfromdir_block[MAX_CLUSTSIZE]
+ __aligned(ARCH_DMA_MINALIGN);
+
+static dir_entry *get_dentfromdir(fsdata *mydata, int startsect,
+ char *filename, dir_entry *retdent,
+ int dols)
+{
+ __u16 prevcksum = 0xffff;
+ __u32 curclust = START(retdent);
+ int files = 0, dirs = 0;
+
+ debug("get_dentfromdir: %s\n", filename);
+
+ while (1) {
+ dir_entry *dentptr;
+
+ int i;
+
+ if (get_cluster(mydata, curclust, get_dentfromdir_block,
+ mydata->clust_size * mydata->sect_size) != 0) {
+ debug("Error: reading directory block\n");
+ return NULL;
+ }
+
+ dentptr = (dir_entry *)get_dentfromdir_block;
+
+ for (i = 0; i < DIRENTSPERCLUST; i++) {
+ char s_name[14], l_name[VFAT_MAXLEN_BYTES];
+
+ l_name[0] = '\0';
+ if (dentptr->name[0] == DELETED_FLAG) {
+ dentptr++;
+ continue;
+ }
+ if ((dentptr->attr & ATTR_VOLUME)) {
+ if (vfat_enabled &&
+ (dentptr->attr & ATTR_VFAT) == ATTR_VFAT &&
+ (dentptr->name[0] & LAST_LONG_ENTRY_MASK)) {
+ prevcksum = ((dir_slot *)dentptr)->alias_checksum;
+ get_vfatname(mydata, curclust,
+ get_dentfromdir_block,
+ dentptr, l_name);
+ if (dols) {
+ int isdir;
+ char dirc;
+ int doit = 0;
+
+ isdir = (dentptr->attr & ATTR_DIR);
+
+ if (isdir) {
+ dirs++;
+ dirc = '/';
+ doit = 1;
+ } else {
+ dirc = ' ';
+ if (l_name[0] != 0) {
+ files++;
+ doit = 1;
+ }
+ }
+ if (doit) {
+ if (dirc == ' ') {
+ printf(" %8ld %s%c\n",
+ (long)FAT2CPU32(dentptr->size),
+ l_name,
+ dirc);
+ } else {
+ printf(" %s%c\n",
+ l_name,
+ dirc);
+ }
+ }
+ dentptr++;
+ continue;
+ }
+ debug("vfatname: |%s|\n", l_name);
+ } else {
+ /* Volume label or VFAT entry */
+ dentptr++;
+ continue;
+ }
+ }
+ if (dentptr->name[0] == 0) {
+ if (dols) {
+ printf("\n%d file(s), %d dir(s)\n\n",
+ files, dirs);
+ }
+ debug("Dentname == NULL - %d\n", i);
+ return NULL;
+ }
+ if (vfat_enabled) {
+ __u8 csum = mkcksum(dentptr->name, dentptr->ext);
+ if (dols && csum == prevcksum) {
+ prevcksum = 0xffff;
+ dentptr++;
+ continue;
+ }
+ }
+
+ get_name(dentptr, s_name);
+ if (dols) {
+ int isdir = (dentptr->attr & ATTR_DIR);
+ char dirc;
+ int doit = 0;
+
+ if (isdir) {
+ dirs++;
+ dirc = '/';
+ doit = 1;
+ } else {
+ dirc = ' ';
+ if (s_name[0] != 0) {
+ files++;
+ doit = 1;
+ }
+ }
+
+ if (doit) {
+ if (dirc == ' ') {
+ printf(" %8ld %s%c\n",
+ (long)FAT2CPU32(dentptr->size),
+ s_name, dirc);
+ } else {
+ printf(" %s%c\n",
+ s_name, dirc);
+ }
+ }
+
+ dentptr++;
+ continue;
+ }
+
+ if (strcmp(filename, s_name)
+ && strcmp(filename, l_name)) {
+ debug("Mismatch: |%s|%s|\n", s_name, l_name);
+ dentptr++;
+ continue;
+ }
+
+ memcpy(retdent, dentptr, sizeof(dir_entry));
+
+ debug("DentName: %s", s_name);
+ debug(", start: 0x%x", START(dentptr));
+ debug(", size: 0x%x %s\n",
+ FAT2CPU32(dentptr->size),
+ (dentptr->attr & ATTR_DIR) ? "(DIR)" : "");
+
+ return retdent;
+ }
+
+ curclust = get_fatent(mydata, curclust);
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ printf("Invalid FAT entry\n");
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Read boot sector and volume info from a FAT filesystem
+ */
+static int
+read_bootsectandvi(boot_sector *bs, volume_info *volinfo, int *fatsize)
+{
+ __u8 *block;
+ volume_info *vistart;
+ int ret = 0;
+
+ if (cur_dev == NULL) {
+ debug("Error: no device selected\n");
+ return -1;
+ }
+
+ block = memalign(ARCH_DMA_MINALIGN, cur_dev->blksz);
+ if (block == NULL) {
+ debug("Error: allocating block\n");
+ return -1;
+ }
+
+ if (disk_read(0, 1, block) < 0) {
+ debug("Error: reading block\n");
+ goto fail;
+ }
+
+ memcpy(bs, block, sizeof(boot_sector));
+ bs->reserved = FAT2CPU16(bs->reserved);
+ bs->fat_length = FAT2CPU16(bs->fat_length);
+ bs->secs_track = FAT2CPU16(bs->secs_track);
+ bs->heads = FAT2CPU16(bs->heads);
+ bs->total_sect = FAT2CPU32(bs->total_sect);
+
+ /* FAT32 entries */
+ if (bs->fat_length == 0) {
+ /* Assume FAT32 */
+ bs->fat32_length = FAT2CPU32(bs->fat32_length);
+ bs->flags = FAT2CPU16(bs->flags);
+ bs->root_cluster = FAT2CPU32(bs->root_cluster);
+ bs->info_sector = FAT2CPU16(bs->info_sector);
+ bs->backup_boot = FAT2CPU16(bs->backup_boot);
+ vistart = (volume_info *)(block + sizeof(boot_sector));
+ *fatsize = 32;
+ } else {
+ vistart = (volume_info *)&(bs->fat32_length);
+ *fatsize = 0;
+ }
+ memcpy(volinfo, vistart, sizeof(volume_info));
+
+ if (*fatsize == 32) {
+ if (strncmp(FAT32_SIGN, vistart->fs_type, SIGNLEN) == 0)
+ goto exit;
+ } else {
+ if (strncmp(FAT12_SIGN, vistart->fs_type, SIGNLEN) == 0) {
+ *fatsize = 12;
+ goto exit;
+ }
+ if (strncmp(FAT16_SIGN, vistart->fs_type, SIGNLEN) == 0) {
+ *fatsize = 16;
+ goto exit;
+ }
+ }
+
+ debug("Error: broken fs_type sign\n");
+fail:
+ ret = -1;
+exit:
+ free(block);
+ return ret;
+}
+
+__u8 do_fat_read_at_block[MAX_CLUSTSIZE]
+ __aligned(ARCH_DMA_MINALIGN);
+
+long
+do_fat_read_at(const char *filename, unsigned long pos, void *buffer,
+ unsigned long maxsize, int dols, int dogetsize)
+{
+ char fnamecopy[2048];
+ boot_sector bs;
+ volume_info volinfo;
+ fsdata datablock;
+ fsdata *mydata = &datablock;
+ dir_entry *dentptr = NULL;
+ __u16 prevcksum = 0xffff;
+ char *subname = "";
+ __u32 cursect;
+ int idx, isdir = 0;
+ int files = 0, dirs = 0;
+ long ret = -1;
+ int firsttime;
+ __u32 root_cluster = 0;
+ int rootdir_size = 0;
+ int j;
+
+ if (read_bootsectandvi(&bs, &volinfo, &mydata->fatsize)) {
+ debug("Error: reading boot sector\n");
+ return -1;
+ }
+
+ if (mydata->fatsize == 32) {
+ root_cluster = bs.root_cluster;
+ mydata->fatlength = bs.fat32_length;
+ } else {
+ mydata->fatlength = bs.fat_length;
+ }
+
+ mydata->fat_sect = bs.reserved;
+
+ cursect = mydata->rootdir_sect
+ = mydata->fat_sect + mydata->fatlength * bs.fats;
+
+ mydata->sect_size = (bs.sector_size[1] << 8) + bs.sector_size[0];
+ mydata->clust_size = bs.cluster_size;
+ if (mydata->sect_size != cur_part_info.blksz) {
+ printf("Error: FAT sector size mismatch (fs=%hu, dev=%lu)\n",
+ mydata->sect_size, cur_part_info.blksz);
+ return -1;
+ }
+
+ if (mydata->fatsize == 32) {
+ mydata->data_begin = mydata->rootdir_sect -
+ (mydata->clust_size * 2);
+ } else {
+ rootdir_size = ((bs.dir_entries[1] * (int)256 +
+ bs.dir_entries[0]) *
+ sizeof(dir_entry)) /
+ mydata->sect_size;
+ mydata->data_begin = mydata->rootdir_sect +
+ rootdir_size -
+ (mydata->clust_size * 2);
+ }
+
+ mydata->fatbufnum = -1;
+ mydata->fatbuf = memalign(ARCH_DMA_MINALIGN, FATBUFSIZE);
+ if (mydata->fatbuf == NULL) {
+ debug("Error: allocating memory\n");
+ return -1;
+ }
+
+ if (vfat_enabled)
+ debug("VFAT Support enabled\n");
+
+ debug("FAT%d, fat_sect: %d, fatlength: %d\n",
+ mydata->fatsize, mydata->fat_sect, mydata->fatlength);
+ debug("Rootdir begins at cluster: %d, sector: %d, offset: %x\n"
+ "Data begins at: %d\n",
+ root_cluster,
+ mydata->rootdir_sect,
+ mydata->rootdir_sect * mydata->sect_size, mydata->data_begin);
+ debug("Sector size: %d, cluster size: %d\n", mydata->sect_size,
+ mydata->clust_size);
+
+ /* "cwd" is always the root... */
+ while (ISDIRDELIM(*filename))
+ filename++;
+
+ /* Make a copy of the filename and convert it to lowercase */
+ strcpy(fnamecopy, filename);
+ downcase(fnamecopy);
+
+ if (*fnamecopy == '\0') {
+ if (!dols)
+ goto exit;
+
+ dols = LS_ROOT;
+ } else if ((idx = dirdelim(fnamecopy)) >= 0) {
+ isdir = 1;
+ fnamecopy[idx] = '\0';
+ subname = fnamecopy + idx + 1;
+
+ /* Handle multiple delimiters */
+ while (ISDIRDELIM(*subname))
+ subname++;
+ } else if (dols) {
+ isdir = 1;
+ }
+
+ j = 0;
+ while (1) {
+ int i;
+
+ if (j == 0) {
+ debug("FAT read sect=%d, clust_size=%d, DIRENTSPERBLOCK=%zd\n",
+ cursect, mydata->clust_size, DIRENTSPERBLOCK);
+
+ if (disk_read(cursect,
+ (mydata->fatsize == 32) ?
+ (mydata->clust_size) :
+ PREFETCH_BLOCKS,
+ do_fat_read_at_block) < 0) {
+ debug("Error: reading rootdir block\n");
+ goto exit;
+ }
+
+ dentptr = (dir_entry *) do_fat_read_at_block;
+ }
+
+ for (i = 0; i < DIRENTSPERBLOCK; i++) {
+ char s_name[14], l_name[VFAT_MAXLEN_BYTES];
+ __u8 csum;
+
+ l_name[0] = '\0';
+ if (dentptr->name[0] == DELETED_FLAG) {
+ dentptr++;
+ continue;
+ }
+
+ if (vfat_enabled)
+ csum = mkcksum(dentptr->name, dentptr->ext);
+
+ if (dentptr->attr & ATTR_VOLUME) {
+ if (vfat_enabled &&
+ (dentptr->attr & ATTR_VFAT) == ATTR_VFAT &&
+ (dentptr->name[0] & LAST_LONG_ENTRY_MASK)) {
+ prevcksum =
+ ((dir_slot *)dentptr)->alias_checksum;
+
+ get_vfatname(mydata,
+ root_cluster,
+ do_fat_read_at_block,
+ dentptr, l_name);
+
+ if (dols == LS_ROOT) {
+ char dirc;
+ int doit = 0;
+ int isdir =
+ (dentptr->attr & ATTR_DIR);
+
+ if (isdir) {
+ dirs++;
+ dirc = '/';
+ doit = 1;
+ } else {
+ dirc = ' ';
+ if (l_name[0] != 0) {
+ files++;
+ doit = 1;
+ }
+ }
+ if (doit) {
+ if (dirc == ' ') {
+ printf(" %8ld %s%c\n",
+ (long)FAT2CPU32(dentptr->size),
+ l_name,
+ dirc);
+ } else {
+ printf(" %s%c\n",
+ l_name,
+ dirc);
+ }
+ }
+ dentptr++;
+ continue;
+ }
+ debug("Rootvfatname: |%s|\n",
+ l_name);
+ } else {
+ /* Volume label or VFAT entry */
+ dentptr++;
+ continue;
+ }
+ } else if (dentptr->name[0] == 0) {
+ debug("RootDentname == NULL - %d\n", i);
+ if (dols == LS_ROOT) {
+ printf("\n%d file(s), %d dir(s)\n\n",
+ files, dirs);
+ ret = 0;
+ }
+ goto exit;
+ }
+ else if (vfat_enabled &&
+ dols == LS_ROOT && csum == prevcksum) {
+ prevcksum = 0xffff;
+ dentptr++;
+ continue;
+ }
+
+ get_name(dentptr, s_name);
+
+ if (dols == LS_ROOT) {
+ int isdir = (dentptr->attr & ATTR_DIR);
+ char dirc;
+ int doit = 0;
+
+ if (isdir) {
+ dirc = '/';
+ if (s_name[0] != 0) {
+ dirs++;
+ doit = 1;
+ }
+ } else {
+ dirc = ' ';
+ if (s_name[0] != 0) {
+ files++;
+ doit = 1;
+ }
+ }
+ if (doit) {
+ if (dirc == ' ') {
+ printf(" %8ld %s%c\n",
+ (long)FAT2CPU32(dentptr->size),
+ s_name, dirc);
+ } else {
+ printf(" %s%c\n",
+ s_name, dirc);
+ }
+ }
+ dentptr++;
+ continue;
+ }
+
+ if (strcmp(fnamecopy, s_name)
+ && strcmp(fnamecopy, l_name)) {
+ debug("RootMismatch: |%s|%s|\n", s_name,
+ l_name);
+ dentptr++;
+ continue;
+ }
+
+ if (isdir && !(dentptr->attr & ATTR_DIR))
+ goto exit;
+
+ debug("RootName: %s", s_name);
+ debug(", start: 0x%x", START(dentptr));
+ debug(", size: 0x%x %s\n",
+ FAT2CPU32(dentptr->size),
+ isdir ? "(DIR)" : "");
+
+ goto rootdir_done; /* We got a match */
+ }
+ debug("END LOOP: j=%d clust_size=%d\n", j,
+ mydata->clust_size);
+
+ /*
+ * On FAT32 we must fetch the FAT entries for the next
+ * root directory clusters when a cluster has been
+ * completely processed.
+ */
+ ++j;
+ int rootdir_end = 0;
+ if (mydata->fatsize == 32) {
+ if (j == mydata->clust_size) {
+ int nxtsect = 0;
+ int nxt_clust = 0;
+
+ nxt_clust = get_fatent(mydata, root_cluster);
+ rootdir_end = CHECK_CLUST(nxt_clust, 32);
+
+ nxtsect = mydata->data_begin +
+ (nxt_clust * mydata->clust_size);
+
+ root_cluster = nxt_clust;
+
+ cursect = nxtsect;
+ j = 0;
+ }
+ } else {
+ if (j == PREFETCH_BLOCKS)
+ j = 0;
+
+ rootdir_end = (++cursect - mydata->rootdir_sect >=
+ rootdir_size);
+ }
+
+ /* If end of rootdir reached */
+ if (rootdir_end) {
+ if (dols == LS_ROOT) {
+ printf("\n%d file(s), %d dir(s)\n\n",
+ files, dirs);
+ ret = 0;
+ }
+ goto exit;
+ }
+ }
+rootdir_done:
+
+ firsttime = 1;
+
+ while (isdir) {
+ int startsect = mydata->data_begin
+ + START(dentptr) * mydata->clust_size;
+ dir_entry dent;
+ char *nextname = NULL;
+
+ dent = *dentptr;
+ dentptr = &dent;
+
+ idx = dirdelim(subname);
+
+ if (idx >= 0) {
+ subname[idx] = '\0';
+ nextname = subname + idx + 1;
+ /* Handle multiple delimiters */
+ while (ISDIRDELIM(*nextname))
+ nextname++;
+ if (dols && *nextname == '\0')
+ firsttime = 0;
+ } else {
+ if (dols && firsttime) {
+ firsttime = 0;
+ } else {
+ isdir = 0;
+ }
+ }
+
+ if (get_dentfromdir(mydata, startsect, subname, dentptr,
+ isdir ? 0 : dols) == NULL) {
+ if (dols && !isdir)
+ ret = 0;
+ goto exit;
+ }
+
+ if (isdir && !(dentptr->attr & ATTR_DIR))
+ goto exit;
+
+ if (idx >= 0)
+ subname = nextname;
+ }
+
+ if (dogetsize)
+ ret = FAT2CPU32(dentptr->size);
+ else
+ ret = get_contents(mydata, dentptr, pos, buffer, maxsize);
+ debug("Size: %d, got: %ld\n", FAT2CPU32(dentptr->size), ret);
+
+exit:
+ free(mydata->fatbuf);
+ return ret;
+}
+
+long
+do_fat_read(const char *filename, void *buffer, unsigned long maxsize, int dols)
+{
+ return do_fat_read_at(filename, 0, buffer, maxsize, dols, 0);
+}
+
+int file_fat_detectfs(void)
+{
+ boot_sector bs;
+ volume_info volinfo;
+ int fatsize;
+ char vol_label[12];
+
+ if (cur_dev == NULL) {
+ printf("No current device\n");
+ return 1;
+ }
+
+#if defined(CONFIG_CMD_IDE) || \
+ defined(CONFIG_CMD_SATA) || \
+ defined(CONFIG_CMD_SCSI) || \
+ defined(CONFIG_CMD_USB) || \
+ defined(CONFIG_MMC)
+ printf("Interface: ");
+ switch (cur_dev->if_type) {
+ case IF_TYPE_IDE:
+ printf("IDE");
+ break;
+ case IF_TYPE_SATA:
+ printf("SATA");
+ break;
+ case IF_TYPE_SCSI:
+ printf("SCSI");
+ break;
+ case IF_TYPE_ATAPI:
+ printf("ATAPI");
+ break;
+ case IF_TYPE_USB:
+ printf("USB");
+ break;
+ case IF_TYPE_DOC:
+ printf("DOC");
+ break;
+ case IF_TYPE_MMC:
+ printf("MMC");
+ break;
+ default:
+ printf("Unknown");
+ }
+
+ printf("\n Device %d: ", cur_dev->dev);
+ dev_print(cur_dev);
+#endif
+
+ if (read_bootsectandvi(&bs, &volinfo, &fatsize)) {
+ printf("\nNo valid FAT fs found\n");
+ return 1;
+ }
+
+ memcpy(vol_label, volinfo.volume_label, 11);
+ vol_label[11] = '\0';
+ volinfo.fs_type[5] = '\0';
+
+ printf("Filesystem: %s \"%s\"\n", volinfo.fs_type, vol_label);
+
+ return 0;
+}
+
+int file_fat_ls(const char *dir)
+{
+ return do_fat_read(dir, NULL, 0, LS_YES);
+}
+
+int fat_exists(const char *filename)
+{
+ int sz;
+ sz = do_fat_read_at(filename, 0, NULL, 0, LS_NO, 1);
+ return sz >= 0;
+}
+
+long file_fat_read_at(const char *filename, unsigned long pos, void *buffer,
+ unsigned long maxsize)
+{
+ printf("reading %s\n", filename);
+ return do_fat_read_at(filename, pos, buffer, maxsize, LS_NO, 0);
+}
+
+long file_fat_read(const char *filename, void *buffer, unsigned long maxsize)
+{
+ return file_fat_read_at(filename, 0, buffer, maxsize);
+}
+
+int fat_read_file(const char *filename, void *buf, int offset, int len)
+{
+ int len_read;
+
+ len_read = file_fat_read_at(filename, offset, buf, len);
+ if (len_read == -1) {
+ printf("** Unable to read file %s **\n", filename);
+ return -1;
+ }
+
+ return len_read;
+}
+
+void fat_close(void)
+{
+}
diff --git a/qemu/roms/u-boot/fs/fat/fat_write.c b/qemu/roms/u-boot/fs/fat/fat_write.c
new file mode 100644
index 000000000..ba7e3aeb0
--- /dev/null
+++ b/qemu/roms/u-boot/fs/fat/fat_write.c
@@ -0,0 +1,1106 @@
+/*
+ * fat_write.c
+ *
+ * R/W (V)FAT 12/16/32 filesystem implementation by Donggeun Kim
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <command.h>
+#include <config.h>
+#include <fat.h>
+#include <asm/byteorder.h>
+#include <part.h>
+#include <linux/ctype.h>
+#include "fat.c"
+
+static void uppercase(char *str, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ *str = toupper(*str);
+ str++;
+ }
+}
+
+static int total_sector;
+static int disk_write(__u32 block, __u32 nr_blocks, void *buf)
+{
+ if (!cur_dev || !cur_dev->block_write)
+ return -1;
+
+ if (cur_part_info.start + block + nr_blocks >
+ cur_part_info.start + total_sector) {
+ printf("error: overflow occurs\n");
+ return -1;
+ }
+
+ return cur_dev->block_write(cur_dev->dev,
+ cur_part_info.start + block, nr_blocks, buf);
+}
+
+/*
+ * Set short name in directory entry
+ */
+static void set_name(dir_entry *dirent, const char *filename)
+{
+ char s_name[VFAT_MAXLEN_BYTES];
+ char *period;
+ int period_location, len, i, ext_num;
+
+ if (filename == NULL)
+ return;
+
+ len = strlen(filename);
+ if (len == 0)
+ return;
+
+ strcpy(s_name, filename);
+ uppercase(s_name, len);
+
+ period = strchr(s_name, '.');
+ if (period == NULL) {
+ period_location = len;
+ ext_num = 0;
+ } else {
+ period_location = period - s_name;
+ ext_num = len - period_location - 1;
+ }
+
+ /* Pad spaces when the length of file name is shorter than eight */
+ if (period_location < 8) {
+ memcpy(dirent->name, s_name, period_location);
+ for (i = period_location; i < 8; i++)
+ dirent->name[i] = ' ';
+ } else if (period_location == 8) {
+ memcpy(dirent->name, s_name, period_location);
+ } else {
+ memcpy(dirent->name, s_name, 6);
+ dirent->name[6] = '~';
+ dirent->name[7] = '1';
+ }
+
+ if (ext_num < 3) {
+ memcpy(dirent->ext, s_name + period_location + 1, ext_num);
+ for (i = ext_num; i < 3; i++)
+ dirent->ext[i] = ' ';
+ } else
+ memcpy(dirent->ext, s_name + period_location + 1, 3);
+
+ debug("name : %s\n", dirent->name);
+ debug("ext : %s\n", dirent->ext);
+}
+
+static __u8 num_of_fats;
+/*
+ * Write fat buffer into block device
+ */
+static int flush_fat_buffer(fsdata *mydata)
+{
+ int getsize = FATBUFBLOCKS;
+ __u32 fatlength = mydata->fatlength;
+ __u8 *bufptr = mydata->fatbuf;
+ __u32 startblock = mydata->fatbufnum * FATBUFBLOCKS;
+
+ startblock += mydata->fat_sect;
+
+ if (getsize > fatlength)
+ getsize = fatlength;
+
+ /* Write FAT buf */
+ if (disk_write(startblock, getsize, bufptr) < 0) {
+ debug("error: writing FAT blocks\n");
+ return -1;
+ }
+
+ if (num_of_fats == 2) {
+ /* Update corresponding second FAT blocks */
+ startblock += mydata->fatlength;
+ if (disk_write(startblock, getsize, bufptr) < 0) {
+ debug("error: writing second FAT blocks\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Get the entry at index 'entry' in a FAT (12/16/32) table.
+ * On failure 0x00 is returned.
+ * When bufnum is changed, write back the previous fatbuf to the disk.
+ */
+static __u32 get_fatent_value(fsdata *mydata, __u32 entry)
+{
+ __u32 bufnum;
+ __u32 off16, offset;
+ __u32 ret = 0x00;
+ __u16 val1, val2;
+
+ if (CHECK_CLUST(entry, mydata->fatsize)) {
+ printf("Error: Invalid FAT entry: 0x%08x\n", entry);
+ return ret;
+ }
+
+ switch (mydata->fatsize) {
+ case 32:
+ bufnum = entry / FAT32BUFSIZE;
+ offset = entry - bufnum * FAT32BUFSIZE;
+ break;
+ case 16:
+ bufnum = entry / FAT16BUFSIZE;
+ offset = entry - bufnum * FAT16BUFSIZE;
+ break;
+ case 12:
+ bufnum = entry / FAT12BUFSIZE;
+ offset = entry - bufnum * FAT12BUFSIZE;
+ break;
+
+ default:
+ /* Unsupported FAT size */
+ return ret;
+ }
+
+ debug("FAT%d: entry: 0x%04x = %d, offset: 0x%04x = %d\n",
+ mydata->fatsize, entry, entry, offset, offset);
+
+ /* Read a new block of FAT entries into the cache. */
+ if (bufnum != mydata->fatbufnum) {
+ int getsize = FATBUFBLOCKS;
+ __u8 *bufptr = mydata->fatbuf;
+ __u32 fatlength = mydata->fatlength;
+ __u32 startblock = bufnum * FATBUFBLOCKS;
+
+ if (getsize > fatlength)
+ getsize = fatlength;
+
+ fatlength *= mydata->sect_size; /* We want it in bytes now */
+ startblock += mydata->fat_sect; /* Offset from start of disk */
+
+ /* Write back the fatbuf to the disk */
+ if (mydata->fatbufnum != -1) {
+ if (flush_fat_buffer(mydata) < 0)
+ return -1;
+ }
+
+ if (disk_read(startblock, getsize, bufptr) < 0) {
+ debug("Error reading FAT blocks\n");
+ return ret;
+ }
+ mydata->fatbufnum = bufnum;
+ }
+
+ /* Get the actual entry from the table */
+ switch (mydata->fatsize) {
+ case 32:
+ ret = FAT2CPU32(((__u32 *) mydata->fatbuf)[offset]);
+ break;
+ case 16:
+ ret = FAT2CPU16(((__u16 *) mydata->fatbuf)[offset]);
+ break;
+ case 12:
+ off16 = (offset * 3) / 4;
+
+ switch (offset & 0x3) {
+ case 0:
+ ret = FAT2CPU16(((__u16 *) mydata->fatbuf)[off16]);
+ ret &= 0xfff;
+ break;
+ case 1:
+ val1 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16]);
+ val1 &= 0xf000;
+ val2 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16 + 1]);
+ val2 &= 0x00ff;
+ ret = (val2 << 4) | (val1 >> 12);
+ break;
+ case 2:
+ val1 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16]);
+ val1 &= 0xff00;
+ val2 = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16 + 1]);
+ val2 &= 0x000f;
+ ret = (val2 << 8) | (val1 >> 8);
+ break;
+ case 3:
+ ret = FAT2CPU16(((__u16 *)mydata->fatbuf)[off16]);
+ ret = (ret & 0xfff0) >> 4;
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ debug("FAT%d: ret: %08x, entry: %08x, offset: %04x\n",
+ mydata->fatsize, ret, entry, offset);
+
+ return ret;
+}
+
+/*
+ * Set the file name information from 'name' into 'slotptr',
+ */
+static int str2slot(dir_slot *slotptr, const char *name, int *idx)
+{
+ int j, end_idx = 0;
+
+ for (j = 0; j <= 8; j += 2) {
+ if (name[*idx] == 0x00) {
+ slotptr->name0_4[j] = 0;
+ slotptr->name0_4[j + 1] = 0;
+ end_idx++;
+ goto name0_4;
+ }
+ slotptr->name0_4[j] = name[*idx];
+ (*idx)++;
+ end_idx++;
+ }
+ for (j = 0; j <= 10; j += 2) {
+ if (name[*idx] == 0x00) {
+ slotptr->name5_10[j] = 0;
+ slotptr->name5_10[j + 1] = 0;
+ end_idx++;
+ goto name5_10;
+ }
+ slotptr->name5_10[j] = name[*idx];
+ (*idx)++;
+ end_idx++;
+ }
+ for (j = 0; j <= 2; j += 2) {
+ if (name[*idx] == 0x00) {
+ slotptr->name11_12[j] = 0;
+ slotptr->name11_12[j + 1] = 0;
+ end_idx++;
+ goto name11_12;
+ }
+ slotptr->name11_12[j] = name[*idx];
+ (*idx)++;
+ end_idx++;
+ }
+
+ if (name[*idx] == 0x00)
+ return 1;
+
+ return 0;
+/* Not used characters are filled with 0xff 0xff */
+name0_4:
+ for (; end_idx < 5; end_idx++) {
+ slotptr->name0_4[end_idx * 2] = 0xff;
+ slotptr->name0_4[end_idx * 2 + 1] = 0xff;
+ }
+ end_idx = 5;
+name5_10:
+ end_idx -= 5;
+ for (; end_idx < 6; end_idx++) {
+ slotptr->name5_10[end_idx * 2] = 0xff;
+ slotptr->name5_10[end_idx * 2 + 1] = 0xff;
+ }
+ end_idx = 11;
+name11_12:
+ end_idx -= 11;
+ for (; end_idx < 2; end_idx++) {
+ slotptr->name11_12[end_idx * 2] = 0xff;
+ slotptr->name11_12[end_idx * 2 + 1] = 0xff;
+ }
+
+ return 1;
+}
+
+static int is_next_clust(fsdata *mydata, dir_entry *dentptr);
+static void flush_dir_table(fsdata *mydata, dir_entry **dentptr);
+
+/*
+ * Fill dir_slot entries with appropriate name, id, and attr
+ * The real directory entry is returned by 'dentptr'
+ */
+static void
+fill_dir_slot(fsdata *mydata, dir_entry **dentptr, const char *l_name)
+{
+ dir_slot *slotptr = (dir_slot *)get_contents_vfatname_block;
+ __u8 counter = 0, checksum;
+ int idx = 0, ret;
+ char s_name[16];
+
+ /* Get short file name and checksum value */
+ strncpy(s_name, (*dentptr)->name, 16);
+ checksum = mkcksum((*dentptr)->name, (*dentptr)->ext);
+
+ do {
+ memset(slotptr, 0x00, sizeof(dir_slot));
+ ret = str2slot(slotptr, l_name, &idx);
+ slotptr->id = ++counter;
+ slotptr->attr = ATTR_VFAT;
+ slotptr->alias_checksum = checksum;
+ slotptr++;
+ } while (ret == 0);
+
+ slotptr--;
+ slotptr->id |= LAST_LONG_ENTRY_MASK;
+
+ while (counter >= 1) {
+ if (is_next_clust(mydata, *dentptr)) {
+ /* A new cluster is allocated for directory table */
+ flush_dir_table(mydata, dentptr);
+ }
+ memcpy(*dentptr, slotptr, sizeof(dir_slot));
+ (*dentptr)++;
+ slotptr--;
+ counter--;
+ }
+
+ if (is_next_clust(mydata, *dentptr)) {
+ /* A new cluster is allocated for directory table */
+ flush_dir_table(mydata, dentptr);
+ }
+}
+
+static __u32 dir_curclust;
+
+/*
+ * Extract the full long filename starting at 'retdent' (which is really
+ * a slot) into 'l_name'. If successful also copy the real directory entry
+ * into 'retdent'
+ * If additional adjacent cluster for directory entries is read into memory,
+ * then 'get_contents_vfatname_block' is copied into 'get_dentfromdir_block' and
+ * the location of the real directory entry is returned by 'retdent'
+ * Return 0 on success, -1 otherwise.
+ */
+static int
+get_long_file_name(fsdata *mydata, int curclust, __u8 *cluster,
+ dir_entry **retdent, char *l_name)
+{
+ dir_entry *realdent;
+ dir_slot *slotptr = (dir_slot *)(*retdent);
+ dir_slot *slotptr2 = NULL;
+ __u8 *buflimit = cluster + mydata->sect_size * ((curclust == 0) ?
+ PREFETCH_BLOCKS :
+ mydata->clust_size);
+ __u8 counter = (slotptr->id & ~LAST_LONG_ENTRY_MASK) & 0xff;
+ int idx = 0, cur_position = 0;
+
+ if (counter > VFAT_MAXSEQ) {
+ debug("Error: VFAT name is too long\n");
+ return -1;
+ }
+
+ while ((__u8 *)slotptr < buflimit) {
+ if (counter == 0)
+ break;
+ if (((slotptr->id & ~LAST_LONG_ENTRY_MASK) & 0xff) != counter)
+ return -1;
+ slotptr++;
+ counter--;
+ }
+
+ if ((__u8 *)slotptr >= buflimit) {
+ if (curclust == 0)
+ return -1;
+ curclust = get_fatent_value(mydata, dir_curclust);
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ printf("Invalid FAT entry\n");
+ return -1;
+ }
+
+ dir_curclust = curclust;
+
+ if (get_cluster(mydata, curclust, get_contents_vfatname_block,
+ mydata->clust_size * mydata->sect_size) != 0) {
+ debug("Error: reading directory block\n");
+ return -1;
+ }
+
+ slotptr2 = (dir_slot *)get_contents_vfatname_block;
+ while (counter > 0) {
+ if (((slotptr2->id & ~LAST_LONG_ENTRY_MASK)
+ & 0xff) != counter)
+ return -1;
+ slotptr2++;
+ counter--;
+ }
+
+ /* Save the real directory entry */
+ realdent = (dir_entry *)slotptr2;
+ while ((__u8 *)slotptr2 > get_contents_vfatname_block) {
+ slotptr2--;
+ slot2str(slotptr2, l_name, &idx);
+ }
+ } else {
+ /* Save the real directory entry */
+ realdent = (dir_entry *)slotptr;
+ }
+
+ do {
+ slotptr--;
+ if (slot2str(slotptr, l_name, &idx))
+ break;
+ } while (!(slotptr->id & LAST_LONG_ENTRY_MASK));
+
+ l_name[idx] = '\0';
+ if (*l_name == DELETED_FLAG)
+ *l_name = '\0';
+ else if (*l_name == aRING)
+ *l_name = DELETED_FLAG;
+ downcase(l_name);
+
+ /* Return the real directory entry */
+ *retdent = realdent;
+
+ if (slotptr2) {
+ memcpy(get_dentfromdir_block, get_contents_vfatname_block,
+ mydata->clust_size * mydata->sect_size);
+ cur_position = (__u8 *)realdent - get_contents_vfatname_block;
+ *retdent = (dir_entry *) &get_dentfromdir_block[cur_position];
+ }
+
+ return 0;
+}
+
+/*
+ * Set the entry at index 'entry' in a FAT (16/32) table.
+ */
+static int set_fatent_value(fsdata *mydata, __u32 entry, __u32 entry_value)
+{
+ __u32 bufnum, offset;
+
+ switch (mydata->fatsize) {
+ case 32:
+ bufnum = entry / FAT32BUFSIZE;
+ offset = entry - bufnum * FAT32BUFSIZE;
+ break;
+ case 16:
+ bufnum = entry / FAT16BUFSIZE;
+ offset = entry - bufnum * FAT16BUFSIZE;
+ break;
+ default:
+ /* Unsupported FAT size */
+ return -1;
+ }
+
+ /* Read a new block of FAT entries into the cache. */
+ if (bufnum != mydata->fatbufnum) {
+ int getsize = FATBUFBLOCKS;
+ __u8 *bufptr = mydata->fatbuf;
+ __u32 fatlength = mydata->fatlength;
+ __u32 startblock = bufnum * FATBUFBLOCKS;
+
+ fatlength *= mydata->sect_size;
+ startblock += mydata->fat_sect;
+
+ if (getsize > fatlength)
+ getsize = fatlength;
+
+ if (mydata->fatbufnum != -1) {
+ if (flush_fat_buffer(mydata) < 0)
+ return -1;
+ }
+
+ if (disk_read(startblock, getsize, bufptr) < 0) {
+ debug("Error reading FAT blocks\n");
+ return -1;
+ }
+ mydata->fatbufnum = bufnum;
+ }
+
+ /* Set the actual entry */
+ switch (mydata->fatsize) {
+ case 32:
+ ((__u32 *) mydata->fatbuf)[offset] = cpu_to_le32(entry_value);
+ break;
+ case 16:
+ ((__u16 *) mydata->fatbuf)[offset] = cpu_to_le16(entry_value);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Determine the entry value at index 'entry' in a FAT (16/32) table
+ */
+static __u32 determine_fatent(fsdata *mydata, __u32 entry)
+{
+ __u32 next_fat, next_entry = entry + 1;
+
+ while (1) {
+ next_fat = get_fatent_value(mydata, next_entry);
+ if (next_fat == 0) {
+ set_fatent_value(mydata, entry, next_entry);
+ break;
+ }
+ next_entry++;
+ }
+ debug("FAT%d: entry: %08x, entry_value: %04x\n",
+ mydata->fatsize, entry, next_entry);
+
+ return next_entry;
+}
+
+/*
+ * Write at most 'size' bytes from 'buffer' into the specified cluster.
+ * Return 0 on success, -1 otherwise.
+ */
+static int
+set_cluster(fsdata *mydata, __u32 clustnum, __u8 *buffer,
+ unsigned long size)
+{
+ int idx = 0;
+ __u32 startsect;
+
+ if (clustnum > 0)
+ startsect = mydata->data_begin +
+ clustnum * mydata->clust_size;
+ else
+ startsect = mydata->rootdir_sect;
+
+ debug("clustnum: %d, startsect: %d\n", clustnum, startsect);
+
+ if ((size / mydata->sect_size) > 0) {
+ if (disk_write(startsect, size / mydata->sect_size, buffer) < 0) {
+ debug("Error writing data\n");
+ return -1;
+ }
+ }
+
+ if (size % mydata->sect_size) {
+ __u8 tmpbuf[mydata->sect_size];
+
+ idx = size / mydata->sect_size;
+ buffer += idx * mydata->sect_size;
+ memcpy(tmpbuf, buffer, size % mydata->sect_size);
+
+ if (disk_write(startsect + idx, 1, tmpbuf) < 0) {
+ debug("Error writing data\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Find the first empty cluster
+ */
+static int find_empty_cluster(fsdata *mydata)
+{
+ __u32 fat_val, entry = 3;
+
+ while (1) {
+ fat_val = get_fatent_value(mydata, entry);
+ if (fat_val == 0)
+ break;
+ entry++;
+ }
+
+ return entry;
+}
+
+/*
+ * Write directory entries in 'get_dentfromdir_block' to block device
+ */
+static void flush_dir_table(fsdata *mydata, dir_entry **dentptr)
+{
+ int dir_newclust = 0;
+
+ if (set_cluster(mydata, dir_curclust,
+ get_dentfromdir_block,
+ mydata->clust_size * mydata->sect_size) != 0) {
+ printf("error: wrinting directory entry\n");
+ return;
+ }
+ dir_newclust = find_empty_cluster(mydata);
+ set_fatent_value(mydata, dir_curclust, dir_newclust);
+ if (mydata->fatsize == 32)
+ set_fatent_value(mydata, dir_newclust, 0xffffff8);
+ else if (mydata->fatsize == 16)
+ set_fatent_value(mydata, dir_newclust, 0xfff8);
+
+ dir_curclust = dir_newclust;
+
+ if (flush_fat_buffer(mydata) < 0)
+ return;
+
+ memset(get_dentfromdir_block, 0x00,
+ mydata->clust_size * mydata->sect_size);
+
+ *dentptr = (dir_entry *) get_dentfromdir_block;
+}
+
+/*
+ * Set empty cluster from 'entry' to the end of a file
+ */
+static int clear_fatent(fsdata *mydata, __u32 entry)
+{
+ __u32 fat_val;
+
+ while (1) {
+ fat_val = get_fatent_value(mydata, entry);
+ if (fat_val != 0)
+ set_fatent_value(mydata, entry, 0);
+ else
+ break;
+
+ if (fat_val == 0xfffffff || fat_val == 0xffff)
+ break;
+
+ entry = fat_val;
+ }
+
+ /* Flush fat buffer */
+ if (flush_fat_buffer(mydata) < 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Write at most 'maxsize' bytes from 'buffer' into
+ * the file associated with 'dentptr'
+ * Return the number of bytes read or -1 on fatal errors.
+ */
+static int
+set_contents(fsdata *mydata, dir_entry *dentptr, __u8 *buffer,
+ unsigned long maxsize)
+{
+ unsigned long filesize = FAT2CPU32(dentptr->size), gotsize = 0;
+ unsigned int bytesperclust = mydata->clust_size * mydata->sect_size;
+ __u32 curclust = START(dentptr);
+ __u32 endclust = 0, newclust = 0;
+ unsigned long actsize;
+
+ debug("Filesize: %ld bytes\n", filesize);
+
+ if (maxsize > 0 && filesize > maxsize)
+ filesize = maxsize;
+
+ debug("%ld bytes\n", filesize);
+
+ actsize = bytesperclust;
+ endclust = curclust;
+ do {
+ /* search for consecutive clusters */
+ while (actsize < filesize) {
+ newclust = determine_fatent(mydata, endclust);
+
+ if ((newclust - 1) != endclust)
+ goto getit;
+
+ if (CHECK_CLUST(newclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", newclust);
+ debug("Invalid FAT entry\n");
+ return gotsize;
+ }
+ endclust = newclust;
+ actsize += bytesperclust;
+ }
+ /* actsize >= file size */
+ actsize -= bytesperclust;
+ /* set remaining clusters */
+ if (set_cluster(mydata, curclust, buffer, (int)actsize) != 0) {
+ debug("error: writing cluster\n");
+ return -1;
+ }
+
+ /* set remaining bytes */
+ gotsize += (int)actsize;
+ filesize -= actsize;
+ buffer += actsize;
+ actsize = filesize;
+
+ if (set_cluster(mydata, endclust, buffer, (int)actsize) != 0) {
+ debug("error: writing cluster\n");
+ return -1;
+ }
+ gotsize += actsize;
+
+ /* Mark end of file in FAT */
+ if (mydata->fatsize == 16)
+ newclust = 0xffff;
+ else if (mydata->fatsize == 32)
+ newclust = 0xfffffff;
+ set_fatent_value(mydata, endclust, newclust);
+
+ return gotsize;
+getit:
+ if (set_cluster(mydata, curclust, buffer, (int)actsize) != 0) {
+ debug("error: writing cluster\n");
+ return -1;
+ }
+ gotsize += (int)actsize;
+ filesize -= actsize;
+ buffer += actsize;
+
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ debug("Invalid FAT entry\n");
+ return gotsize;
+ }
+ actsize = bytesperclust;
+ curclust = endclust = newclust;
+ } while (1);
+}
+
+/*
+ * Fill dir_entry
+ */
+static void fill_dentry(fsdata *mydata, dir_entry *dentptr,
+ const char *filename, __u32 start_cluster, __u32 size, __u8 attr)
+{
+ if (mydata->fatsize == 32)
+ dentptr->starthi =
+ cpu_to_le16((start_cluster & 0xffff0000) >> 16);
+ dentptr->start = cpu_to_le16(start_cluster & 0xffff);
+ dentptr->size = cpu_to_le32(size);
+
+ dentptr->attr = attr;
+
+ set_name(dentptr, filename);
+}
+
+/*
+ * Check whether adding a file makes the file system to
+ * exceed the size of the block device
+ * Return -1 when overflow occurs, otherwise return 0
+ */
+static int check_overflow(fsdata *mydata, __u32 clustnum, unsigned long size)
+{
+ __u32 startsect, sect_num;
+
+ if (clustnum > 0) {
+ startsect = mydata->data_begin +
+ clustnum * mydata->clust_size;
+ } else {
+ startsect = mydata->rootdir_sect;
+ }
+
+ sect_num = size / mydata->sect_size;
+ if (size % mydata->sect_size)
+ sect_num++;
+
+ if (startsect + sect_num > cur_part_info.start + total_sector)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Check if adding several entries exceed one cluster boundary
+ */
+static int is_next_clust(fsdata *mydata, dir_entry *dentptr)
+{
+ int cur_position;
+
+ cur_position = (__u8 *)dentptr - get_dentfromdir_block;
+
+ if (cur_position >= mydata->clust_size * mydata->sect_size)
+ return 1;
+ else
+ return 0;
+}
+
+static dir_entry *empty_dentptr;
+/*
+ * Find a directory entry based on filename or start cluster number
+ * If the directory entry is not found,
+ * the new position for writing a directory entry will be returned
+ */
+static dir_entry *find_directory_entry(fsdata *mydata, int startsect,
+ char *filename, dir_entry *retdent, __u32 start)
+{
+ __u32 curclust = (startsect - mydata->data_begin) / mydata->clust_size;
+
+ debug("get_dentfromdir: %s\n", filename);
+
+ while (1) {
+ dir_entry *dentptr;
+
+ int i;
+
+ if (get_cluster(mydata, curclust, get_dentfromdir_block,
+ mydata->clust_size * mydata->sect_size) != 0) {
+ printf("Error: reading directory block\n");
+ return NULL;
+ }
+
+ dentptr = (dir_entry *)get_dentfromdir_block;
+
+ dir_curclust = curclust;
+
+ for (i = 0; i < DIRENTSPERCLUST; i++) {
+ char s_name[14], l_name[VFAT_MAXLEN_BYTES];
+
+ l_name[0] = '\0';
+ if (dentptr->name[0] == DELETED_FLAG) {
+ dentptr++;
+ if (is_next_clust(mydata, dentptr))
+ break;
+ continue;
+ }
+ if ((dentptr->attr & ATTR_VOLUME)) {
+ if (vfat_enabled &&
+ (dentptr->attr & ATTR_VFAT) &&
+ (dentptr->name[0] & LAST_LONG_ENTRY_MASK)) {
+ get_long_file_name(mydata, curclust,
+ get_dentfromdir_block,
+ &dentptr, l_name);
+ debug("vfatname: |%s|\n", l_name);
+ } else {
+ /* Volume label or VFAT entry */
+ dentptr++;
+ if (is_next_clust(mydata, dentptr))
+ break;
+ continue;
+ }
+ }
+ if (dentptr->name[0] == 0) {
+ debug("Dentname == NULL - %d\n", i);
+ empty_dentptr = dentptr;
+ return NULL;
+ }
+
+ get_name(dentptr, s_name);
+
+ if (strcmp(filename, s_name)
+ && strcmp(filename, l_name)) {
+ debug("Mismatch: |%s|%s|\n",
+ s_name, l_name);
+ dentptr++;
+ if (is_next_clust(mydata, dentptr))
+ break;
+ continue;
+ }
+
+ memcpy(retdent, dentptr, sizeof(dir_entry));
+
+ debug("DentName: %s", s_name);
+ debug(", start: 0x%x", START(dentptr));
+ debug(", size: 0x%x %s\n",
+ FAT2CPU32(dentptr->size),
+ (dentptr->attr & ATTR_DIR) ?
+ "(DIR)" : "");
+
+ return dentptr;
+ }
+
+ /*
+ * In FAT16/12, the root dir is locate before data area, shows
+ * in following:
+ * -------------------------------------------------------------
+ * | Boot | FAT1 & 2 | Root dir | Data (start from cluster #2) |
+ * -------------------------------------------------------------
+ *
+ * As a result if curclust is in Root dir, it is a negative
+ * number or 0, 1.
+ *
+ */
+ if (mydata->fatsize != 32 && (int)curclust <= 1) {
+ /* Current clust is in root dir, set to next clust */
+ curclust++;
+ if ((int)curclust <= 1)
+ continue; /* continue to find */
+
+ /* Reach the end of root dir */
+ empty_dentptr = dentptr;
+ return NULL;
+ }
+
+ curclust = get_fatent_value(mydata, dir_curclust);
+ if (IS_LAST_CLUST(curclust, mydata->fatsize)) {
+ empty_dentptr = dentptr;
+ return NULL;
+ }
+ if (CHECK_CLUST(curclust, mydata->fatsize)) {
+ debug("curclust: 0x%x\n", curclust);
+ debug("Invalid FAT entry\n");
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+static int do_fat_write(const char *filename, void *buffer,
+ unsigned long size)
+{
+ dir_entry *dentptr, *retdent;
+ __u32 startsect;
+ __u32 start_cluster;
+ boot_sector bs;
+ volume_info volinfo;
+ fsdata datablock;
+ fsdata *mydata = &datablock;
+ int cursect;
+ int ret = -1, name_len;
+ char l_filename[VFAT_MAXLEN_BYTES];
+ int write_size = size;
+
+ dir_curclust = 0;
+
+ if (read_bootsectandvi(&bs, &volinfo, &mydata->fatsize)) {
+ debug("error: reading boot sector\n");
+ return -1;
+ }
+
+ total_sector = bs.total_sect;
+ if (total_sector == 0)
+ total_sector = cur_part_info.size;
+
+ if (mydata->fatsize == 32)
+ mydata->fatlength = bs.fat32_length;
+ else
+ mydata->fatlength = bs.fat_length;
+
+ mydata->fat_sect = bs.reserved;
+
+ cursect = mydata->rootdir_sect
+ = mydata->fat_sect + mydata->fatlength * bs.fats;
+ num_of_fats = bs.fats;
+
+ mydata->sect_size = (bs.sector_size[1] << 8) + bs.sector_size[0];
+ mydata->clust_size = bs.cluster_size;
+
+ if (mydata->fatsize == 32) {
+ mydata->data_begin = mydata->rootdir_sect -
+ (mydata->clust_size * 2);
+ } else {
+ int rootdir_size;
+
+ rootdir_size = ((bs.dir_entries[1] * (int)256 +
+ bs.dir_entries[0]) *
+ sizeof(dir_entry)) /
+ mydata->sect_size;
+ mydata->data_begin = mydata->rootdir_sect +
+ rootdir_size -
+ (mydata->clust_size * 2);
+ }
+
+ mydata->fatbufnum = -1;
+ mydata->fatbuf = memalign(ARCH_DMA_MINALIGN, FATBUFSIZE);
+ if (mydata->fatbuf == NULL) {
+ debug("Error: allocating memory\n");
+ return -1;
+ }
+
+ if (disk_read(cursect,
+ (mydata->fatsize == 32) ?
+ (mydata->clust_size) :
+ PREFETCH_BLOCKS, do_fat_read_at_block) < 0) {
+ debug("Error: reading rootdir block\n");
+ goto exit;
+ }
+ dentptr = (dir_entry *) do_fat_read_at_block;
+
+ name_len = strlen(filename);
+ if (name_len >= VFAT_MAXLEN_BYTES)
+ name_len = VFAT_MAXLEN_BYTES - 1;
+
+ memcpy(l_filename, filename, name_len);
+ l_filename[name_len] = 0; /* terminate the string */
+ downcase(l_filename);
+
+ startsect = mydata->rootdir_sect;
+ retdent = find_directory_entry(mydata, startsect,
+ l_filename, dentptr, 0);
+ if (retdent) {
+ /* Update file size and start_cluster in a directory entry */
+ retdent->size = cpu_to_le32(size);
+ start_cluster = FAT2CPU16(retdent->start);
+ if (mydata->fatsize == 32)
+ start_cluster |=
+ (FAT2CPU16(retdent->starthi) << 16);
+
+ ret = check_overflow(mydata, start_cluster, size);
+ if (ret) {
+ printf("Error: %ld overflow\n", size);
+ goto exit;
+ }
+
+ ret = clear_fatent(mydata, start_cluster);
+ if (ret) {
+ printf("Error: clearing FAT entries\n");
+ goto exit;
+ }
+
+ ret = set_contents(mydata, retdent, buffer, size);
+ if (ret < 0) {
+ printf("Error: writing contents\n");
+ goto exit;
+ }
+ write_size = ret;
+ debug("attempt to write 0x%x bytes\n", write_size);
+
+ /* Flush fat buffer */
+ ret = flush_fat_buffer(mydata);
+ if (ret) {
+ printf("Error: flush fat buffer\n");
+ goto exit;
+ }
+
+ /* Write directory table to device */
+ ret = set_cluster(mydata, dir_curclust,
+ get_dentfromdir_block,
+ mydata->clust_size * mydata->sect_size);
+ if (ret) {
+ printf("Error: writing directory entry\n");
+ goto exit;
+ }
+ } else {
+ /* Set short name to set alias checksum field in dir_slot */
+ set_name(empty_dentptr, filename);
+ fill_dir_slot(mydata, &empty_dentptr, filename);
+
+ ret = start_cluster = find_empty_cluster(mydata);
+ if (ret < 0) {
+ printf("Error: finding empty cluster\n");
+ goto exit;
+ }
+
+ ret = check_overflow(mydata, start_cluster, size);
+ if (ret) {
+ printf("Error: %ld overflow\n", size);
+ goto exit;
+ }
+
+ /* Set attribute as archieve for regular file */
+ fill_dentry(mydata, empty_dentptr, filename,
+ start_cluster, size, 0x20);
+
+ ret = set_contents(mydata, empty_dentptr, buffer, size);
+ if (ret < 0) {
+ printf("Error: writing contents\n");
+ goto exit;
+ }
+ write_size = ret;
+ debug("attempt to write 0x%x bytes\n", write_size);
+
+ /* Flush fat buffer */
+ ret = flush_fat_buffer(mydata);
+ if (ret) {
+ printf("Error: flush fat buffer\n");
+ goto exit;
+ }
+
+ /* Write directory table to device */
+ ret = set_cluster(mydata, dir_curclust,
+ get_dentfromdir_block,
+ mydata->clust_size * mydata->sect_size);
+ if (ret) {
+ printf("Error: writing directory entry\n");
+ goto exit;
+ }
+ }
+
+exit:
+ free(mydata->fatbuf);
+ return ret < 0 ? ret : write_size;
+}
+
+int file_fat_write(const char *filename, void *buffer, unsigned long maxsize)
+{
+ printf("writing %s\n", filename);
+ return do_fat_write(filename, buffer, maxsize);
+}
diff --git a/qemu/roms/u-boot/fs/fat/file.c b/qemu/roms/u-boot/fs/fat/file.c
new file mode 100644
index 000000000..d910c46dd
--- /dev/null
+++ b/qemu/roms/u-boot/fs/fat/file.c
@@ -0,0 +1,184 @@
+/*
+ * file.c
+ *
+ * Mini "VFS" by Marcus Sundberg
+ *
+ * 2002-07-28 - rjones@nexus-tech.net - ported to ppcboot v1.1.6
+ * 2003-03-10 - kharris@nexus-tech.net - ported to uboot
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <config.h>
+#include <malloc.h>
+#include <fat.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+
+/* Supported filesystems */
+static const struct filesystem filesystems[] = {
+ { file_fat_detectfs, file_fat_ls, file_fat_read, "FAT" },
+};
+#define NUM_FILESYS (sizeof(filesystems)/sizeof(struct filesystem))
+
+/* The filesystem which was last detected */
+static int current_filesystem = FSTYPE_NONE;
+
+/* The current working directory */
+#define CWD_LEN 511
+char file_cwd[CWD_LEN+1] = "/";
+
+const char *
+file_getfsname(int idx)
+{
+ if (idx < 0 || idx >= NUM_FILESYS)
+ return NULL;
+
+ return filesystems[idx].name;
+}
+
+static void
+pathcpy(char *dest, const char *src)
+{
+ char *origdest = dest;
+
+ do {
+ if (dest-file_cwd >= CWD_LEN) {
+ *dest = '\0';
+ return;
+ }
+ *(dest) = *(src);
+ if (*src == '\0') {
+ if (dest-- != origdest && ISDIRDELIM(*dest)) {
+ *dest = '\0';
+ }
+ return;
+ }
+ ++dest;
+
+ if (ISDIRDELIM(*src))
+ while (ISDIRDELIM(*src)) src++;
+ else
+ src++;
+ } while (1);
+}
+
+int
+file_cd(const char *path)
+{
+ if (ISDIRDELIM(*path)) {
+ while (ISDIRDELIM(*path)) path++;
+ strncpy(file_cwd+1, path, CWD_LEN-1);
+ } else {
+ const char *origpath = path;
+ char *tmpstr = file_cwd;
+ int back = 0;
+
+ while (*tmpstr != '\0') tmpstr++;
+ do {
+ tmpstr--;
+ } while (ISDIRDELIM(*tmpstr));
+
+ while (*path == '.') {
+ path++;
+ while (*path == '.') {
+ path++;
+ back++;
+ }
+ if (*path != '\0' && !ISDIRDELIM(*path)) {
+ path = origpath;
+ back = 0;
+ break;
+ }
+ while (ISDIRDELIM(*path)) path++;
+ origpath = path;
+ }
+
+ while (back--) {
+ /* Strip off path component */
+ while (!ISDIRDELIM(*tmpstr)) {
+ tmpstr--;
+ }
+ if (tmpstr == file_cwd) {
+ /* Incremented again right after the loop. */
+ tmpstr--;
+ break;
+ }
+ /* Skip delimiters */
+ while (ISDIRDELIM(*tmpstr)) tmpstr--;
+ }
+ tmpstr++;
+ if (*path == '\0') {
+ if (tmpstr == file_cwd) {
+ *tmpstr = '/';
+ tmpstr++;
+ }
+ *tmpstr = '\0';
+ return 0;
+ }
+ *tmpstr = '/';
+ pathcpy(tmpstr+1, path);
+ }
+
+ return 0;
+}
+
+int
+file_detectfs(void)
+{
+ int i;
+
+ current_filesystem = FSTYPE_NONE;
+
+ for (i = 0; i < NUM_FILESYS; i++) {
+ if (filesystems[i].detect() == 0) {
+ strcpy(file_cwd, "/");
+ current_filesystem = i;
+ break;
+ }
+ }
+
+ return current_filesystem;
+}
+
+int
+file_ls(const char *dir)
+{
+ char fullpath[1024];
+ const char *arg;
+
+ if (current_filesystem == FSTYPE_NONE) {
+ printf("Can't list files without a filesystem!\n");
+ return -1;
+ }
+
+ if (ISDIRDELIM(*dir)) {
+ arg = dir;
+ } else {
+ sprintf(fullpath, "%s/%s", file_cwd, dir);
+ arg = fullpath;
+ }
+ return filesystems[current_filesystem].ls(arg);
+}
+
+long
+file_read(const char *filename, void *buffer, unsigned long maxsize)
+{
+ char fullpath[1024];
+ const char *arg;
+
+ if (current_filesystem == FSTYPE_NONE) {
+ printf("Can't load file without a filesystem!\n");
+ return -1;
+ }
+
+ if (ISDIRDELIM(*filename)) {
+ arg = filename;
+ } else {
+ sprintf(fullpath, "%s/%s", file_cwd, filename);
+ arg = fullpath;
+ }
+
+ return filesystems[current_filesystem].read(arg, buffer, maxsize);
+}
diff --git a/qemu/roms/u-boot/fs/fs.c b/qemu/roms/u-boot/fs/fs.c
new file mode 100644
index 000000000..79d432d58
--- /dev/null
+++ b/qemu/roms/u-boot/fs/fs.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+#include <common.h>
+#include <part.h>
+#include <ext4fs.h>
+#include <fat.h>
+#include <fs.h>
+#include <sandboxfs.h>
+#include <asm/io.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static block_dev_desc_t *fs_dev_desc;
+static disk_partition_t fs_partition;
+static int fs_type = FS_TYPE_ANY;
+
+static inline int fs_probe_unsupported(block_dev_desc_t *fs_dev_desc,
+ disk_partition_t *fs_partition)
+{
+ printf("** Unrecognized filesystem type **\n");
+ return -1;
+}
+
+static inline int fs_ls_unsupported(const char *dirname)
+{
+ return -1;
+}
+
+static inline int fs_exists_unsupported(const char *filename)
+{
+ return 0;
+}
+
+static inline int fs_read_unsupported(const char *filename, void *buf,
+ int offset, int len)
+{
+ return -1;
+}
+
+static inline int fs_write_unsupported(const char *filename, void *buf,
+ int offset, int len)
+{
+ return -1;
+}
+
+static inline void fs_close_unsupported(void)
+{
+}
+
+struct fstype_info {
+ int fstype;
+ /*
+ * Is it legal to pass NULL as .probe()'s fs_dev_desc parameter? This
+ * should be false in most cases. For "virtual" filesystems which
+ * aren't based on a U-Boot block device (e.g. sandbox), this can be
+ * set to true. This should also be true for the dumm entry at the end
+ * of fstypes[], since that is essentially a "virtual" (non-existent)
+ * filesystem.
+ */
+ bool null_dev_desc_ok;
+ int (*probe)(block_dev_desc_t *fs_dev_desc,
+ disk_partition_t *fs_partition);
+ int (*ls)(const char *dirname);
+ int (*exists)(const char *filename);
+ int (*read)(const char *filename, void *buf, int offset, int len);
+ int (*write)(const char *filename, void *buf, int offset, int len);
+ void (*close)(void);
+};
+
+static struct fstype_info fstypes[] = {
+#ifdef CONFIG_FS_FAT
+ {
+ .fstype = FS_TYPE_FAT,
+ .null_dev_desc_ok = false,
+ .probe = fat_set_blk_dev,
+ .close = fat_close,
+ .ls = file_fat_ls,
+ .exists = fat_exists,
+ .read = fat_read_file,
+ .write = fs_write_unsupported,
+ },
+#endif
+#ifdef CONFIG_FS_EXT4
+ {
+ .fstype = FS_TYPE_EXT,
+ .null_dev_desc_ok = false,
+ .probe = ext4fs_probe,
+ .close = ext4fs_close,
+ .ls = ext4fs_ls,
+ .exists = ext4fs_exists,
+ .read = ext4_read_file,
+ .write = fs_write_unsupported,
+ },
+#endif
+#ifdef CONFIG_SANDBOX
+ {
+ .fstype = FS_TYPE_SANDBOX,
+ .null_dev_desc_ok = true,
+ .probe = sandbox_fs_set_blk_dev,
+ .close = sandbox_fs_close,
+ .ls = sandbox_fs_ls,
+ .exists = sandbox_fs_exists,
+ .read = fs_read_sandbox,
+ .write = fs_write_sandbox,
+ },
+#endif
+ {
+ .fstype = FS_TYPE_ANY,
+ .null_dev_desc_ok = true,
+ .probe = fs_probe_unsupported,
+ .close = fs_close_unsupported,
+ .ls = fs_ls_unsupported,
+ .exists = fs_exists_unsupported,
+ .read = fs_read_unsupported,
+ .write = fs_write_unsupported,
+ },
+};
+
+static struct fstype_info *fs_get_info(int fstype)
+{
+ struct fstype_info *info;
+ int i;
+
+ for (i = 0, info = fstypes; i < ARRAY_SIZE(fstypes) - 1; i++, info++) {
+ if (fstype == info->fstype)
+ return info;
+ }
+
+ /* Return the 'unsupported' sentinel */
+ return info;
+}
+
+int fs_set_blk_dev(const char *ifname, const char *dev_part_str, int fstype)
+{
+ struct fstype_info *info;
+ int part, i;
+#ifdef CONFIG_NEEDS_MANUAL_RELOC
+ static int relocated;
+
+ if (!relocated) {
+ for (i = 0, info = fstypes; i < ARRAY_SIZE(fstypes);
+ i++, info++) {
+ info->probe += gd->reloc_off;
+ info->close += gd->reloc_off;
+ info->ls += gd->reloc_off;
+ info->read += gd->reloc_off;
+ info->write += gd->reloc_off;
+ }
+ relocated = 1;
+ }
+#endif
+
+ part = get_device_and_partition(ifname, dev_part_str, &fs_dev_desc,
+ &fs_partition, 1);
+ if (part < 0)
+ return -1;
+
+ for (i = 0, info = fstypes; i < ARRAY_SIZE(fstypes); i++, info++) {
+ if (fstype != FS_TYPE_ANY && info->fstype != FS_TYPE_ANY &&
+ fstype != info->fstype)
+ continue;
+
+ if (!fs_dev_desc && !info->null_dev_desc_ok)
+ continue;
+
+ if (!info->probe(fs_dev_desc, &fs_partition)) {
+ fs_type = info->fstype;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void fs_close(void)
+{
+ struct fstype_info *info = fs_get_info(fs_type);
+
+ info->close();
+
+ fs_type = FS_TYPE_ANY;
+}
+
+int fs_ls(const char *dirname)
+{
+ int ret;
+
+ struct fstype_info *info = fs_get_info(fs_type);
+
+ ret = info->ls(dirname);
+
+ fs_type = FS_TYPE_ANY;
+ fs_close();
+
+ return ret;
+}
+
+int fs_exists(const char *filename)
+{
+ int ret;
+
+ struct fstype_info *info = fs_get_info(fs_type);
+
+ ret = info->exists(filename);
+
+ fs_close();
+
+ return ret;
+}
+
+int fs_read(const char *filename, ulong addr, int offset, int len)
+{
+ struct fstype_info *info = fs_get_info(fs_type);
+ void *buf;
+ int ret;
+
+ /*
+ * We don't actually know how many bytes are being read, since len==0
+ * means read the whole file.
+ */
+ buf = map_sysmem(addr, len);
+ ret = info->read(filename, buf, offset, len);
+ unmap_sysmem(buf);
+
+ /* If we requested a specific number of bytes, check we got it */
+ if (ret >= 0 && len && ret != len) {
+ printf("** Unable to read file %s **\n", filename);
+ ret = -1;
+ }
+ fs_close();
+
+ return ret;
+}
+
+int fs_write(const char *filename, ulong addr, int offset, int len)
+{
+ struct fstype_info *info = fs_get_info(fs_type);
+ void *buf;
+ int ret;
+
+ buf = map_sysmem(addr, len);
+ ret = info->write(filename, buf, offset, len);
+ unmap_sysmem(buf);
+
+ if (ret >= 0 && ret != len) {
+ printf("** Unable to write file %s **\n", filename);
+ ret = -1;
+ }
+ fs_close();
+
+ return ret;
+}
+
+int do_load(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[],
+ int fstype)
+{
+ unsigned long addr;
+ const char *addr_str;
+ const char *filename;
+ unsigned long bytes;
+ unsigned long pos;
+ int len_read;
+ unsigned long time;
+
+ if (argc < 2)
+ return CMD_RET_USAGE;
+ if (argc > 7)
+ return CMD_RET_USAGE;
+
+ if (fs_set_blk_dev(argv[1], (argc >= 3) ? argv[2] : NULL, fstype))
+ return 1;
+
+ if (argc >= 4) {
+ addr = simple_strtoul(argv[3], NULL, 16);
+ } else {
+ addr_str = getenv("loadaddr");
+ if (addr_str != NULL)
+ addr = simple_strtoul(addr_str, NULL, 16);
+ else
+ addr = CONFIG_SYS_LOAD_ADDR;
+ }
+ if (argc >= 5) {
+ filename = argv[4];
+ } else {
+ filename = getenv("bootfile");
+ if (!filename) {
+ puts("** No boot file defined **\n");
+ return 1;
+ }
+ }
+ if (argc >= 6)
+ bytes = simple_strtoul(argv[5], NULL, 16);
+ else
+ bytes = 0;
+ if (argc >= 7)
+ pos = simple_strtoul(argv[6], NULL, 16);
+ else
+ pos = 0;
+
+ time = get_timer(0);
+ len_read = fs_read(filename, addr, pos, bytes);
+ time = get_timer(time);
+ if (len_read <= 0)
+ return 1;
+
+ printf("%d bytes read in %lu ms", len_read, time);
+ if (time > 0) {
+ puts(" (");
+ print_size(len_read / time * 1000, "/s");
+ puts(")");
+ }
+ puts("\n");
+
+ setenv_hex("filesize", len_read);
+
+ return 0;
+}
+
+int do_ls(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[],
+ int fstype)
+{
+ if (argc < 2)
+ return CMD_RET_USAGE;
+ if (argc > 4)
+ return CMD_RET_USAGE;
+
+ if (fs_set_blk_dev(argv[1], (argc >= 3) ? argv[2] : NULL, fstype))
+ return 1;
+
+ if (fs_ls(argc >= 4 ? argv[3] : "/"))
+ return 1;
+
+ return 0;
+}
+
+int file_exists(const char *dev_type, const char *dev_part, const char *file,
+ int fstype)
+{
+ if (fs_set_blk_dev(dev_type, dev_part, fstype))
+ return 0;
+
+ return fs_exists(file);
+}
+
+int do_save(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[],
+ int fstype)
+{
+ unsigned long addr;
+ const char *filename;
+ unsigned long bytes;
+ unsigned long pos;
+ int len;
+ unsigned long time;
+
+ if (argc < 6 || argc > 7)
+ return CMD_RET_USAGE;
+
+ if (fs_set_blk_dev(argv[1], argv[2], fstype))
+ return 1;
+
+ filename = argv[3];
+ addr = simple_strtoul(argv[4], NULL, 16);
+ bytes = simple_strtoul(argv[5], NULL, 16);
+ if (argc >= 7)
+ pos = simple_strtoul(argv[6], NULL, 16);
+ else
+ pos = 0;
+
+ time = get_timer(0);
+ len = fs_write(filename, addr, pos, bytes);
+ time = get_timer(time);
+ if (len <= 0)
+ return 1;
+
+ printf("%d bytes written in %lu ms", len, time);
+ if (time > 0) {
+ puts(" (");
+ print_size(len / time * 1000, "/s");
+ puts(")");
+ }
+ puts("\n");
+
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/LICENCE b/qemu/roms/u-boot/fs/jffs2/LICENCE
new file mode 100644
index 000000000..562885908
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/LICENCE
@@ -0,0 +1,30 @@
+The files in this directory and elsewhere which refer to this LICENCE
+file are part of JFFS2, the Journalling Flash File System v2.
+
+ Copyright © 2001-2007 Red Hat, Inc. and others
+
+JFFS2 is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 or (at your option) any later
+version.
+
+JFFS2 is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License along
+with JFFS2; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+As a special exception, if other files instantiate templates or use
+macros or inline functions from these files, or you compile these
+files and link them with other works to produce a work based on these
+files, these files do not by themselves cause the resulting work to be
+covered by the GNU General Public License. However the source code for
+these files must still be made available in accordance with section (3)
+of the GNU General Public License.
+
+This exception does not invalidate any other reasons why a work based on
+this file might be covered by the GNU General Public License.
+
diff --git a/qemu/roms/u-boot/fs/jffs2/Makefile b/qemu/roms/u-boot/fs/jffs2/Makefile
new file mode 100644
index 000000000..4cb0600cf
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/Makefile
@@ -0,0 +1,13 @@
+#
+# (C) Copyright 2000-2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-$(CONFIG_JFFS2_LZO) += compr_lzo.o
+obj-y += compr_rtime.o
+obj-y += compr_rubin.o
+obj-y += compr_zlib.o
+obj-y += jffs2_1pass.o
+obj-y += mini_inflate.o
diff --git a/qemu/roms/u-boot/fs/jffs2/compr_lzo.c b/qemu/roms/u-boot/fs/jffs2/compr_lzo.c
new file mode 100644
index 000000000..e648ec4fb
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/compr_lzo.c
@@ -0,0 +1,401 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Patrik Kluba,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in the
+ * jffs2 directory.
+ *
+ * $Id: compr_lzo.c,v 1.3 2004/06/23 16:34:39 havasi Exp $
+ *
+ */
+
+/*
+ LZO1X-1 (and -999) compression module for jffs2
+ based on the original LZO sources
+*/
+
+/* -*- Mode: C; indent-tabs-mode: t; c-basic-offset: 4; tab-width: 4 -*- */
+
+/*
+ Original copyright notice follows:
+
+ lzo1x_9x.c -- implementation of the LZO1X-999 compression algorithm
+ lzo_ptr.h -- low-level pointer constructs
+ lzo_swd.ch -- sliding window dictionary
+ lzoconf.h -- configuration for the LZO real-time data compression library
+ lzo_mchw.ch -- matching functions using a window
+ minilzo.c -- mini subset of the LZO real-time data compression library
+ config1x.h -- configuration for the LZO1X algorithm
+ lzo1x.h -- public interface of the LZO1X compression algorithm
+
+ These files are part of the LZO real-time data compression library.
+
+ Copyright (C) 1996-2002 Markus Franz Xaver Johannes Oberhumer
+ All Rights Reserved.
+
+ The LZO library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of
+ the License, or (at your option) any later version.
+
+ The LZO library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the LZO library; see the file COPYING.
+ If not, write to the Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+ Markus F.X.J. Oberhumer
+ <markus@oberhumer.com>
+*/
+
+/*
+
+ 2004-02-16 pajko <pajko(AT)halom(DOT)u-szeged(DOT)hu>
+ Initial release
+ -removed all 16 bit code
+ -all sensitive data will be on 4 byte boundary
+ -removed check parts for library use
+ -removed all but LZO1X-* compression
+
+*/
+
+
+#include <config.h>
+#include <linux/stddef.h>
+#include <jffs2/jffs2.h>
+#include <jffs2/compr_rubin.h>
+
+/* Integral types that have *exactly* the same number of bits as a lzo_voidp */
+typedef unsigned long lzo_ptr_t;
+typedef long lzo_sptr_t;
+
+/* data type definitions */
+#define U32 unsigned long
+#define S32 signed long
+#define I32 long
+#define U16 unsigned short
+#define S16 signed short
+#define I16 short
+#define U8 unsigned char
+#define S8 signed char
+#define I8 char
+
+#define M1_MAX_OFFSET 0x0400
+#define M2_MAX_OFFSET 0x0800
+#define M3_MAX_OFFSET 0x4000
+#define M4_MAX_OFFSET 0xbfff
+
+#define __COPY4(dst,src) * (lzo_uint32p)(dst) = * (const lzo_uint32p)(src)
+#define COPY4(dst,src) __COPY4((lzo_ptr_t)(dst),(lzo_ptr_t)(src))
+
+#define TEST_IP (ip < ip_end)
+#define TEST_OP (op <= op_end)
+
+#define NEED_IP(x) \
+ if ((lzo_uint)(ip_end - ip) < (lzo_uint)(x)) goto input_overrun
+#define NEED_OP(x) \
+ if ((lzo_uint)(op_end - op) < (lzo_uint)(x)) goto output_overrun
+#define TEST_LOOKBEHIND(m_pos,out) if (m_pos < out) goto lookbehind_overrun
+
+typedef U32 lzo_uint32;
+typedef I32 lzo_int32;
+typedef U32 lzo_uint;
+typedef I32 lzo_int;
+typedef int lzo_bool;
+
+#define lzo_byte U8
+#define lzo_bytep U8 *
+#define lzo_charp char *
+#define lzo_voidp void *
+#define lzo_shortp short *
+#define lzo_ushortp unsigned short *
+#define lzo_uint32p lzo_uint32 *
+#define lzo_int32p lzo_int32 *
+#define lzo_uintp lzo_uint *
+#define lzo_intp lzo_int *
+#define lzo_voidpp lzo_voidp *
+#define lzo_bytepp lzo_bytep *
+#define lzo_sizeof_dict_t sizeof(lzo_bytep)
+
+#define LZO_E_OK 0
+#define LZO_E_ERROR (-1)
+#define LZO_E_OUT_OF_MEMORY (-2) /* not used right now */
+#define LZO_E_NOT_COMPRESSIBLE (-3) /* not used right now */
+#define LZO_E_INPUT_OVERRUN (-4)
+#define LZO_E_OUTPUT_OVERRUN (-5)
+#define LZO_E_LOOKBEHIND_OVERRUN (-6)
+#define LZO_E_EOF_NOT_FOUND (-7)
+#define LZO_E_INPUT_NOT_CONSUMED (-8)
+
+#define PTR(a) ((lzo_ptr_t) (a))
+#define PTR_LINEAR(a) PTR(a)
+#define PTR_ALIGNED_4(a) ((PTR_LINEAR(a) & 3) == 0)
+#define PTR_ALIGNED_8(a) ((PTR_LINEAR(a) & 7) == 0)
+#define PTR_ALIGNED2_4(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 3) == 0)
+#define PTR_ALIGNED2_8(a,b) (((PTR_LINEAR(a) | PTR_LINEAR(b)) & 7) == 0)
+#define PTR_LT(a,b) (PTR(a) < PTR(b))
+#define PTR_GE(a,b) (PTR(a) >= PTR(b))
+#define PTR_DIFF(a,b) ((lzo_ptrdiff_t) (PTR(a) - PTR(b)))
+#define pd(a,b) ((lzo_uint) ((a)-(b)))
+
+typedef ptrdiff_t lzo_ptrdiff_t;
+
+static int
+lzo1x_decompress (const lzo_byte * in, lzo_uint in_len,
+ lzo_byte * out, lzo_uintp out_len, lzo_voidp wrkmem)
+{
+ register lzo_byte *op;
+ register const lzo_byte *ip;
+ register lzo_uint t;
+
+ register const lzo_byte *m_pos;
+
+ const lzo_byte *const ip_end = in + in_len;
+ lzo_byte *const op_end = out + *out_len;
+
+ *out_len = 0;
+
+ op = out;
+ ip = in;
+
+ if (*ip > 17)
+ {
+ t = *ip++ - 17;
+ if (t < 4)
+ goto match_next;
+ NEED_OP (t);
+ NEED_IP (t + 1);
+ do
+ *op++ = *ip++;
+ while (--t > 0);
+ goto first_literal_run;
+ }
+
+ while (TEST_IP && TEST_OP)
+ {
+ t = *ip++;
+ if (t >= 16)
+ goto match;
+ if (t == 0)
+ {
+ NEED_IP (1);
+ while (*ip == 0)
+ {
+ t += 255;
+ ip++;
+ NEED_IP (1);
+ }
+ t += 15 + *ip++;
+ }
+ NEED_OP (t + 3);
+ NEED_IP (t + 4);
+ if (PTR_ALIGNED2_4 (op, ip))
+ {
+ COPY4 (op, ip);
+
+ op += 4;
+ ip += 4;
+ if (--t > 0)
+ {
+ if (t >= 4)
+ {
+ do
+ {
+ COPY4 (op, ip);
+ op += 4;
+ ip += 4;
+ t -= 4;
+ }
+ while (t >= 4);
+ if (t > 0)
+ do
+ *op++ = *ip++;
+ while (--t > 0);
+ }
+ else
+ do
+ *op++ = *ip++;
+ while (--t > 0);
+ }
+ }
+ else
+ {
+ *op++ = *ip++;
+ *op++ = *ip++;
+ *op++ = *ip++;
+ do
+ *op++ = *ip++;
+ while (--t > 0);
+ }
+ first_literal_run:
+
+ t = *ip++;
+ if (t >= 16)
+ goto match;
+
+ m_pos = op - (1 + M2_MAX_OFFSET);
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LOOKBEHIND (m_pos, out);
+ NEED_OP (3);
+ *op++ = *m_pos++;
+ *op++ = *m_pos++;
+ *op++ = *m_pos;
+
+ goto match_done;
+
+ while (TEST_IP && TEST_OP)
+ {
+ match:
+ if (t >= 64)
+ {
+ m_pos = op - 1;
+ m_pos -= (t >> 2) & 7;
+ m_pos -= *ip++ << 3;
+ t = (t >> 5) - 1;
+ TEST_LOOKBEHIND (m_pos, out);
+ NEED_OP (t + 3 - 1);
+ goto copy_match;
+
+ }
+ else if (t >= 32)
+ {
+ t &= 31;
+ if (t == 0)
+ {
+ NEED_IP (1);
+ while (*ip == 0)
+ {
+ t += 255;
+ ip++;
+ NEED_IP (1);
+ }
+ t += 31 + *ip++;
+ }
+
+ m_pos = op - 1;
+ m_pos -= (ip[0] >> 2) + (ip[1] << 6);
+
+ ip += 2;
+ }
+ else if (t >= 16)
+ {
+ m_pos = op;
+ m_pos -= (t & 8) << 11;
+
+ t &= 7;
+ if (t == 0)
+ {
+ NEED_IP (1);
+ while (*ip == 0)
+ {
+ t += 255;
+ ip++;
+ NEED_IP (1);
+ }
+ t += 7 + *ip++;
+ }
+
+ m_pos -= (ip[0] >> 2) + (ip[1] << 6);
+
+ ip += 2;
+ if (m_pos == op)
+ goto eof_found;
+ m_pos -= 0x4000;
+ }
+ else
+ {
+
+ m_pos = op - 1;
+ m_pos -= t >> 2;
+ m_pos -= *ip++ << 2;
+ TEST_LOOKBEHIND (m_pos, out);
+ NEED_OP (2);
+ *op++ = *m_pos++;
+ *op++ = *m_pos;
+
+ goto match_done;
+ }
+
+ TEST_LOOKBEHIND (m_pos, out);
+ NEED_OP (t + 3 - 1);
+ if (t >= 2 * 4 - (3 - 1)
+ && PTR_ALIGNED2_4 (op, m_pos))
+ {
+ COPY4 (op, m_pos);
+ op += 4;
+ m_pos += 4;
+ t -= 4 - (3 - 1);
+ do
+ {
+ COPY4 (op, m_pos);
+ op += 4;
+ m_pos += 4;
+ t -= 4;
+ }
+ while (t >= 4);
+ if (t > 0)
+ do
+ *op++ = *m_pos++;
+ while (--t > 0);
+ }
+ else
+
+ {
+ copy_match:
+ *op++ = *m_pos++;
+ *op++ = *m_pos++;
+ do
+ *op++ = *m_pos++;
+ while (--t > 0);
+ }
+
+ match_done:
+ t = ip[-2] & 3;
+
+ if (t == 0)
+ break;
+
+ match_next:
+ NEED_OP (t);
+ NEED_IP (t + 1);
+ do
+ *op++ = *ip++;
+ while (--t > 0);
+ t = *ip++;
+ }
+ }
+ *out_len = op - out;
+ return LZO_E_EOF_NOT_FOUND;
+
+ eof_found:
+ *out_len = op - out;
+ return (ip == ip_end ? LZO_E_OK :
+ (ip <
+ ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN));
+
+ input_overrun:
+ *out_len = op - out;
+ return LZO_E_INPUT_OVERRUN;
+
+ output_overrun:
+ *out_len = op - out;
+ return LZO_E_OUTPUT_OVERRUN;
+
+ lookbehind_overrun:
+ *out_len = op - out;
+ return LZO_E_LOOKBEHIND_OVERRUN;
+}
+
+int lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ u32 srclen, u32 destlen)
+{
+ lzo_uint outlen = destlen;
+ return lzo1x_decompress (data_in, srclen, cpage_out, &outlen, NULL);
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/compr_rtime.c b/qemu/roms/u-boot/fs/jffs2/compr_rtime.c
new file mode 100644
index 000000000..89b9f2f13
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/compr_rtime.c
@@ -0,0 +1,87 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001 Red Hat, Inc.
+ *
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * The original JFFS, from which the design for JFFS2 was derived,
+ * was designed and implemented by Axis Communications AB.
+ *
+ * The contents of this file are subject to the Red Hat eCos Public
+ * License Version 1.1 (the "Licence"); you may not use this file
+ * except in compliance with the Licence. You may obtain a copy of
+ * the Licence at http://www.redhat.com/
+ *
+ * Software distributed under the Licence is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
+ * See the Licence for the specific language governing rights and
+ * limitations under the Licence.
+ *
+ * The Original Code is JFFS2 - Journalling Flash File System, version 2
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the RHEPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the RHEPL or the GPL.
+ *
+ * $Id: compr_rtime.c,v 1.2 2002/01/24 22:58:42 rfeany Exp $
+ *
+ *
+ * Very simple lz77-ish encoder.
+ *
+ * Theory of operation: Both encoder and decoder have a list of "last
+ * occurances" for every possible source-value; after sending the
+ * first source-byte, the second byte indicated the "run" length of
+ * matches
+ *
+ * The algorithm is intended to only send "whole bytes", no bit-messing.
+ *
+ */
+
+#include <config.h>
+#include <jffs2/jffs2.h>
+
+void rtime_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ u32 srclen, u32 destlen)
+{
+ int positions[256];
+ int outpos;
+ int pos;
+ int i;
+
+ outpos = pos = 0;
+
+ for (i = 0; i < 256; positions[i++] = 0);
+
+ while (outpos<destlen) {
+ unsigned char value;
+ int backoffs;
+ int repeat;
+
+ value = data_in[pos++];
+ cpage_out[outpos++] = value; /* first the verbatim copied byte */
+ repeat = data_in[pos++];
+ backoffs = positions[value];
+
+ positions[value]=outpos;
+ if (repeat) {
+ if (backoffs + repeat >= outpos) {
+ while(repeat) {
+ cpage_out[outpos++] = cpage_out[backoffs++];
+ repeat--;
+ }
+ } else {
+ for (i = 0; i < repeat; i++)
+ *(cpage_out + outpos + i) = *(cpage_out + backoffs + i);
+ outpos+=repeat;
+ }
+ }
+ }
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/compr_rubin.c b/qemu/roms/u-boot/fs/jffs2/compr_rubin.c
new file mode 100644
index 000000000..9ff221773
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/compr_rubin.c
@@ -0,0 +1,122 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001 Red Hat, Inc.
+ *
+ * Created by Arjan van de Ven <arjanv@redhat.com>
+ *
+ * Heavily modified by Russ Dill <Russ.Dill@asu.edu> in an attempt at
+ * a little more speed.
+ *
+ * The original JFFS, from which the design for JFFS2 was derived,
+ * was designed and implemented by Axis Communications AB.
+ *
+ * The contents of this file are subject to the Red Hat eCos Public
+ * License Version 1.1 (the "Licence"); you may not use this file
+ * except in compliance with the Licence. You may obtain a copy of
+ * the Licence at http://www.redhat.com/
+ *
+ * Software distributed under the Licence is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
+ * See the Licence for the specific language governing rights and
+ * limitations under the Licence.
+ *
+ * The Original Code is JFFS2 - Journalling Flash File System, version 2
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the RHEPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the RHEPL or the GPL.
+ *
+ * $Id: compr_rubin.c,v 1.2 2002/01/24 22:58:42 rfeany Exp $
+ *
+ */
+
+#include <config.h>
+#include <jffs2/jffs2.h>
+#include <jffs2/compr_rubin.h>
+
+
+void rubin_do_decompress(unsigned char *bits, unsigned char *in,
+ unsigned char *page_out, __u32 destlen)
+{
+ register char *curr = (char *)page_out;
+ char *end = (char *)(page_out + destlen);
+ register unsigned long temp;
+ register unsigned long result;
+ register unsigned long p;
+ register unsigned long q;
+ register unsigned long rec_q;
+ register unsigned long bit;
+ register long i0;
+ unsigned long i;
+
+ /* init_pushpull */
+ temp = *(u32 *) in;
+ bit = 16;
+
+ /* init_rubin */
+ q = 0;
+ p = (long) (2 * UPPER_BIT_RUBIN);
+
+ /* init_decode */
+ rec_q = (in[0] << 8) | in[1];
+
+ while (curr < end) {
+ /* in byte */
+
+ result = 0;
+ for (i = 0; i < 8; i++) {
+ /* decode */
+
+ while ((q & UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN)) {
+ q &= ~UPPER_BIT_RUBIN;
+ q <<= 1;
+ p <<= 1;
+ rec_q &= ~UPPER_BIT_RUBIN;
+ rec_q <<= 1;
+ rec_q |= (temp >> (bit++ ^ 7)) & 1;
+ if (bit > 31) {
+ u32 *p = (u32 *)in;
+ bit = 0;
+ temp = *(++p);
+ in = (unsigned char *)p;
+ }
+ }
+ i0 = (bits[i] * p) >> 8;
+
+ if (i0 <= 0) i0 = 1;
+ /* if it fails, it fails, we have our crc
+ if (i0 >= p) i0 = p - 1; */
+
+ result >>= 1;
+ if (rec_q < q + i0) {
+ /* result |= 0x00; */
+ p = i0;
+ } else {
+ result |= 0x80;
+ p -= i0;
+ q += i0;
+ }
+ }
+ *(curr++) = result;
+ }
+}
+
+void dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ unsigned long sourcelen, unsigned long dstlen)
+{
+ unsigned char bits[8];
+ int c;
+
+ for (c=0; c<8; c++)
+ bits[c] = (256 - data_in[c]);
+
+ rubin_do_decompress(bits, data_in+8, cpage_out, dstlen);
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/compr_zlib.c b/qemu/roms/u-boot/fs/jffs2/compr_zlib.c
new file mode 100644
index 000000000..d306b6dc4
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/compr_zlib.c
@@ -0,0 +1,48 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ *
+ * The original JFFS, from which the design for JFFS2 was derived,
+ * was designed and implemented by Axis Communications AB.
+ *
+ * The contents of this file are subject to the Red Hat eCos Public
+ * License Version 1.1 (the "Licence"); you may not use this file
+ * except in compliance with the Licence. You may obtain a copy of
+ * the Licence at http://www.redhat.com/
+ *
+ * Software distributed under the Licence is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
+ * See the Licence for the specific language governing rights and
+ * limitations under the Licence.
+ *
+ * The Original Code is JFFS2 - Journalling Flash File System, version 2
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the RHEPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the RHEPL or the GPL.
+ *
+ * $Id: compr_zlib.c,v 1.2 2002/01/24 22:58:42 rfeany Exp $
+ *
+ */
+
+#include <common.h>
+#include <config.h>
+#include <jffs2/jffs2.h>
+#include <jffs2/mini_inflate.h>
+
+long zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
+ __u32 srclen, __u32 destlen)
+{
+ return (decompress_block(cpage_out, data_in + 2, (void *) ldr_memcpy));
+
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/jffs2_1pass.c b/qemu/roms/u-boot/fs/jffs2/jffs2_1pass.c
new file mode 100644
index 000000000..3fb5db383
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/jffs2_1pass.c
@@ -0,0 +1,1865 @@
+/*
+-------------------------------------------------------------------------
+ * Filename: jffs2.c
+ * Version: $Id: jffs2_1pass.c,v 1.7 2002/01/25 01:56:47 nyet Exp $
+ * Copyright: Copyright (C) 2001, Russ Dill
+ * Author: Russ Dill <Russ.Dill@asu.edu>
+ * Description: Module to load kernel from jffs2
+ *-----------------------------------------------------------------------*/
+/*
+ * some portions of this code are taken from jffs2, and as such, the
+ * following copyright notice is included.
+ *
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ *
+ * The original JFFS, from which the design for JFFS2 was derived,
+ * was designed and implemented by Axis Communications AB.
+ *
+ * The contents of this file are subject to the Red Hat eCos Public
+ * License Version 1.1 (the "Licence"); you may not use this file
+ * except in compliance with the Licence. You may obtain a copy of
+ * the Licence at http://www.redhat.com/
+ *
+ * Software distributed under the Licence is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
+ * See the Licence for the specific language governing rights and
+ * limitations under the Licence.
+ *
+ * The Original Code is JFFS2 - Journalling Flash File System, version 2
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the RHEPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the RHEPL or the GPL.
+ *
+ * $Id: jffs2_1pass.c,v 1.7 2002/01/25 01:56:47 nyet Exp $
+ *
+ */
+
+/* Ok, so anyone who knows the jffs2 code will probably want to get a papar
+ * bag to throw up into before reading this code. I looked through the jffs2
+ * code, the caching scheme is very elegant. I tried to keep the version
+ * for a bootloader as small and simple as possible. Instead of worring about
+ * unneccesary data copies, node scans, etc, I just optimized for the known
+ * common case, a kernel, which looks like:
+ * (1) most pages are 4096 bytes
+ * (2) version numbers are somewhat sorted in acsending order
+ * (3) multiple compressed blocks making up one page is uncommon
+ *
+ * So I create a linked list of decending version numbers (insertions at the
+ * head), and then for each page, walk down the list, until a matching page
+ * with 4096 bytes is found, and then decompress the watching pages in
+ * reverse order.
+ *
+ */
+
+/*
+ * Adapted by Nye Liu <nyet@zumanetworks.com> and
+ * Rex Feany <rfeany@zumanetworks.com>
+ * on Jan/2002 for U-Boot.
+ *
+ * Clipped out all the non-1pass functions, cleaned up warnings,
+ * wrappers, etc. No major changes to the code.
+ * Please, he really means it when he said have a paper bag
+ * handy. We needed it ;).
+ *
+ */
+
+/*
+ * Bugfixing by Kai-Uwe Bloem <kai-uwe.bloem@auerswald.de>, (C) Mar/2003
+ *
+ * - overhaul of the memory management. Removed much of the "paper-bagging"
+ * in that part of the code, fixed several bugs, now frees memory when
+ * partition is changed.
+ * It's still ugly :-(
+ * - fixed a bug in jffs2_1pass_read_inode where the file length calculation
+ * was incorrect. Removed a bit of the paper-bagging as well.
+ * - removed double crc calculation for fragment headers in jffs2_private.h
+ * for speedup.
+ * - scan_empty rewritten in a more "standard" manner (non-paperbag, that is).
+ * - spinning wheel now spins depending on how much memory has been scanned
+ * - lots of small changes all over the place to "improve" readability.
+ * - implemented fragment sorting to ensure that the newest data is copied
+ * if there are multiple copies of fragments for a certain file offset.
+ *
+ * The fragment sorting feature must be enabled by CONFIG_SYS_JFFS2_SORT_FRAGMENTS.
+ * Sorting is done while adding fragments to the lists, which is more or less a
+ * bubble sort. This takes a lot of time, and is most probably not an issue if
+ * the boot filesystem is always mounted readonly.
+ *
+ * You should define it if the boot filesystem is mounted writable, and updates
+ * to the boot files are done by copying files to that filesystem.
+ *
+ *
+ * There's a big issue left: endianess is completely ignored in this code. Duh!
+ *
+ *
+ * You still should have paper bags at hand :-(. The code lacks more or less
+ * any comment, and is still arcane and difficult to read in places. As this
+ * might be incompatible with any new code from the jffs2 maintainers anyway,
+ * it should probably be dumped and replaced by something like jffs2reader!
+ */
+
+
+#include <common.h>
+#include <config.h>
+#include <malloc.h>
+#include <div64.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <watchdog.h>
+#include <jffs2/jffs2.h>
+#include <jffs2/jffs2_1pass.h>
+#include <linux/compat.h>
+#include <asm/errno.h>
+
+#include "jffs2_private.h"
+
+
+#define NODE_CHUNK 1024 /* size of memory allocation chunk in b_nodes */
+#define SPIN_BLKSIZE 18 /* spin after having scanned 1<<BLKSIZE bytes */
+
+/* Debugging switches */
+#undef DEBUG_DIRENTS /* print directory entry list after scan */
+#undef DEBUG_FRAGMENTS /* print fragment list after scan */
+#undef DEBUG /* enable debugging messages */
+
+
+#ifdef DEBUG
+# define DEBUGF(fmt,args...) printf(fmt ,##args)
+#else
+# define DEBUGF(fmt,args...)
+#endif
+
+#include "summary.h"
+
+/* keeps pointer to currentlu processed partition */
+static struct part_info *current_part;
+
+#if (defined(CONFIG_JFFS2_NAND) && \
+ defined(CONFIG_CMD_NAND) )
+#include <nand.h>
+/*
+ * Support for jffs2 on top of NAND-flash
+ *
+ * NAND memory isn't mapped in processor's address space,
+ * so data should be fetched from flash before
+ * being processed. This is exactly what functions declared
+ * here do.
+ *
+ */
+
+#define NAND_PAGE_SIZE 512
+#define NAND_PAGE_SHIFT 9
+#define NAND_PAGE_MASK (~(NAND_PAGE_SIZE-1))
+
+#ifndef NAND_CACHE_PAGES
+#define NAND_CACHE_PAGES 16
+#endif
+#define NAND_CACHE_SIZE (NAND_CACHE_PAGES*NAND_PAGE_SIZE)
+
+static u8* nand_cache = NULL;
+static u32 nand_cache_off = (u32)-1;
+
+static int read_nand_cached(u32 off, u32 size, u_char *buf)
+{
+ struct mtdids *id = current_part->dev->id;
+ u32 bytes_read = 0;
+ size_t retlen;
+ int cpy_bytes;
+
+ while (bytes_read < size) {
+ if ((off + bytes_read < nand_cache_off) ||
+ (off + bytes_read >= nand_cache_off+NAND_CACHE_SIZE)) {
+ nand_cache_off = (off + bytes_read) & NAND_PAGE_MASK;
+ if (!nand_cache) {
+ /* This memory never gets freed but 'cause
+ it's a bootloader, nobody cares */
+ nand_cache = malloc(NAND_CACHE_SIZE);
+ if (!nand_cache) {
+ printf("read_nand_cached: can't alloc cache size %d bytes\n",
+ NAND_CACHE_SIZE);
+ return -1;
+ }
+ }
+
+ retlen = NAND_CACHE_SIZE;
+ if (nand_read(&nand_info[id->num], nand_cache_off,
+ &retlen, nand_cache) != 0 ||
+ retlen != NAND_CACHE_SIZE) {
+ printf("read_nand_cached: error reading nand off %#x size %d bytes\n",
+ nand_cache_off, NAND_CACHE_SIZE);
+ return -1;
+ }
+ }
+ cpy_bytes = nand_cache_off + NAND_CACHE_SIZE - (off + bytes_read);
+ if (cpy_bytes > size - bytes_read)
+ cpy_bytes = size - bytes_read;
+ memcpy(buf + bytes_read,
+ nand_cache + off + bytes_read - nand_cache_off,
+ cpy_bytes);
+ bytes_read += cpy_bytes;
+ }
+ return bytes_read;
+}
+
+static void *get_fl_mem_nand(u32 off, u32 size, void *ext_buf)
+{
+ u_char *buf = ext_buf ? (u_char*)ext_buf : (u_char*)malloc(size);
+
+ if (NULL == buf) {
+ printf("get_fl_mem_nand: can't alloc %d bytes\n", size);
+ return NULL;
+ }
+ if (read_nand_cached(off, size, buf) < 0) {
+ if (!ext_buf)
+ free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static void *get_node_mem_nand(u32 off, void *ext_buf)
+{
+ struct jffs2_unknown_node node;
+ void *ret = NULL;
+
+ if (NULL == get_fl_mem_nand(off, sizeof(node), &node))
+ return NULL;
+
+ if (!(ret = get_fl_mem_nand(off, node.magic ==
+ JFFS2_MAGIC_BITMASK ? node.totlen : sizeof(node),
+ ext_buf))) {
+ printf("off = %#x magic %#x type %#x node.totlen = %d\n",
+ off, node.magic, node.nodetype, node.totlen);
+ }
+ return ret;
+}
+
+static void put_fl_mem_nand(void *buf)
+{
+ free(buf);
+}
+#endif
+
+#if defined(CONFIG_CMD_ONENAND)
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <onenand_uboot.h>
+
+#define ONENAND_PAGE_SIZE 2048
+#define ONENAND_PAGE_SHIFT 11
+#define ONENAND_PAGE_MASK (~(ONENAND_PAGE_SIZE-1))
+
+#ifndef ONENAND_CACHE_PAGES
+#define ONENAND_CACHE_PAGES 4
+#endif
+#define ONENAND_CACHE_SIZE (ONENAND_CACHE_PAGES*ONENAND_PAGE_SIZE)
+
+static u8* onenand_cache;
+static u32 onenand_cache_off = (u32)-1;
+
+static int read_onenand_cached(u32 off, u32 size, u_char *buf)
+{
+ u32 bytes_read = 0;
+ size_t retlen;
+ int cpy_bytes;
+
+ while (bytes_read < size) {
+ if ((off + bytes_read < onenand_cache_off) ||
+ (off + bytes_read >= onenand_cache_off + ONENAND_CACHE_SIZE)) {
+ onenand_cache_off = (off + bytes_read) & ONENAND_PAGE_MASK;
+ if (!onenand_cache) {
+ /* This memory never gets freed but 'cause
+ it's a bootloader, nobody cares */
+ onenand_cache = malloc(ONENAND_CACHE_SIZE);
+ if (!onenand_cache) {
+ printf("read_onenand_cached: can't alloc cache size %d bytes\n",
+ ONENAND_CACHE_SIZE);
+ return -1;
+ }
+ }
+
+ retlen = ONENAND_CACHE_SIZE;
+ if (onenand_read(&onenand_mtd, onenand_cache_off, retlen,
+ &retlen, onenand_cache) != 0 ||
+ retlen != ONENAND_CACHE_SIZE) {
+ printf("read_onenand_cached: error reading nand off %#x size %d bytes\n",
+ onenand_cache_off, ONENAND_CACHE_SIZE);
+ return -1;
+ }
+ }
+ cpy_bytes = onenand_cache_off + ONENAND_CACHE_SIZE - (off + bytes_read);
+ if (cpy_bytes > size - bytes_read)
+ cpy_bytes = size - bytes_read;
+ memcpy(buf + bytes_read,
+ onenand_cache + off + bytes_read - onenand_cache_off,
+ cpy_bytes);
+ bytes_read += cpy_bytes;
+ }
+ return bytes_read;
+}
+
+static void *get_fl_mem_onenand(u32 off, u32 size, void *ext_buf)
+{
+ u_char *buf = ext_buf ? (u_char *)ext_buf : (u_char *)malloc(size);
+
+ if (NULL == buf) {
+ printf("get_fl_mem_onenand: can't alloc %d bytes\n", size);
+ return NULL;
+ }
+ if (read_onenand_cached(off, size, buf) < 0) {
+ if (!ext_buf)
+ free(buf);
+ return NULL;
+ }
+
+ return buf;
+}
+
+static void *get_node_mem_onenand(u32 off, void *ext_buf)
+{
+ struct jffs2_unknown_node node;
+ void *ret = NULL;
+
+ if (NULL == get_fl_mem_onenand(off, sizeof(node), &node))
+ return NULL;
+
+ ret = get_fl_mem_onenand(off, node.magic ==
+ JFFS2_MAGIC_BITMASK ? node.totlen : sizeof(node),
+ ext_buf);
+ if (!ret) {
+ printf("off = %#x magic %#x type %#x node.totlen = %d\n",
+ off, node.magic, node.nodetype, node.totlen);
+ }
+ return ret;
+}
+
+
+static void put_fl_mem_onenand(void *buf)
+{
+ free(buf);
+}
+#endif
+
+
+#if defined(CONFIG_CMD_FLASH)
+/*
+ * Support for jffs2 on top of NOR-flash
+ *
+ * NOR flash memory is mapped in processor's address space,
+ * just return address.
+ */
+static inline void *get_fl_mem_nor(u32 off, u32 size, void *ext_buf)
+{
+ u32 addr = off;
+ struct mtdids *id = current_part->dev->id;
+
+ extern flash_info_t flash_info[];
+ flash_info_t *flash = &flash_info[id->num];
+
+ addr += flash->start[0];
+ if (ext_buf) {
+ memcpy(ext_buf, (void *)addr, size);
+ return ext_buf;
+ }
+ return (void*)addr;
+}
+
+static inline void *get_node_mem_nor(u32 off, void *ext_buf)
+{
+ struct jffs2_unknown_node *pNode;
+
+ /* pNode will point directly to flash - don't provide external buffer
+ and don't care about size */
+ pNode = get_fl_mem_nor(off, 0, NULL);
+ return (void *)get_fl_mem_nor(off, pNode->magic == JFFS2_MAGIC_BITMASK ?
+ pNode->totlen : sizeof(*pNode), ext_buf);
+}
+#endif
+
+
+/*
+ * Generic jffs2 raw memory and node read routines.
+ *
+ */
+static inline void *get_fl_mem(u32 off, u32 size, void *ext_buf)
+{
+ struct mtdids *id = current_part->dev->id;
+
+ switch(id->type) {
+#if defined(CONFIG_CMD_FLASH)
+ case MTD_DEV_TYPE_NOR:
+ return get_fl_mem_nor(off, size, ext_buf);
+ break;
+#endif
+#if defined(CONFIG_JFFS2_NAND) && defined(CONFIG_CMD_NAND)
+ case MTD_DEV_TYPE_NAND:
+ return get_fl_mem_nand(off, size, ext_buf);
+ break;
+#endif
+#if defined(CONFIG_CMD_ONENAND)
+ case MTD_DEV_TYPE_ONENAND:
+ return get_fl_mem_onenand(off, size, ext_buf);
+ break;
+#endif
+ default:
+ printf("get_fl_mem: unknown device type, " \
+ "using raw offset!\n");
+ }
+ return (void*)off;
+}
+
+static inline void *get_node_mem(u32 off, void *ext_buf)
+{
+ struct mtdids *id = current_part->dev->id;
+
+ switch(id->type) {
+#if defined(CONFIG_CMD_FLASH)
+ case MTD_DEV_TYPE_NOR:
+ return get_node_mem_nor(off, ext_buf);
+ break;
+#endif
+#if defined(CONFIG_JFFS2_NAND) && \
+ defined(CONFIG_CMD_NAND)
+ case MTD_DEV_TYPE_NAND:
+ return get_node_mem_nand(off, ext_buf);
+ break;
+#endif
+#if defined(CONFIG_CMD_ONENAND)
+ case MTD_DEV_TYPE_ONENAND:
+ return get_node_mem_onenand(off, ext_buf);
+ break;
+#endif
+ default:
+ printf("get_fl_mem: unknown device type, " \
+ "using raw offset!\n");
+ }
+ return (void*)off;
+}
+
+static inline void put_fl_mem(void *buf, void *ext_buf)
+{
+ struct mtdids *id = current_part->dev->id;
+
+ /* If buf is the same as ext_buf, it was provided by the caller -
+ we shouldn't free it then. */
+ if (buf == ext_buf)
+ return;
+ switch (id->type) {
+#if defined(CONFIG_JFFS2_NAND) && defined(CONFIG_CMD_NAND)
+ case MTD_DEV_TYPE_NAND:
+ return put_fl_mem_nand(buf);
+#endif
+#if defined(CONFIG_CMD_ONENAND)
+ case MTD_DEV_TYPE_ONENAND:
+ return put_fl_mem_onenand(buf);
+#endif
+ }
+}
+
+/* Compression names */
+static char *compr_names[] = {
+ "NONE",
+ "ZERO",
+ "RTIME",
+ "RUBINMIPS",
+ "COPY",
+ "DYNRUBIN",
+ "ZLIB",
+#if defined(CONFIG_JFFS2_LZO)
+ "LZO",
+#endif
+};
+
+/* Memory management */
+struct mem_block {
+ u32 index;
+ struct mem_block *next;
+ struct b_node nodes[NODE_CHUNK];
+};
+
+
+static void
+free_nodes(struct b_list *list)
+{
+ while (list->listMemBase != NULL) {
+ struct mem_block *next = list->listMemBase->next;
+ free( list->listMemBase );
+ list->listMemBase = next;
+ }
+}
+
+static struct b_node *
+add_node(struct b_list *list)
+{
+ u32 index = 0;
+ struct mem_block *memBase;
+ struct b_node *b;
+
+ memBase = list->listMemBase;
+ if (memBase != NULL)
+ index = memBase->index;
+#if 0
+ putLabeledWord("add_node: index = ", index);
+ putLabeledWord("add_node: memBase = ", list->listMemBase);
+#endif
+
+ if (memBase == NULL || index >= NODE_CHUNK) {
+ /* we need more space before we continue */
+ memBase = mmalloc(sizeof(struct mem_block));
+ if (memBase == NULL) {
+ putstr("add_node: malloc failed\n");
+ return NULL;
+ }
+ memBase->next = list->listMemBase;
+ index = 0;
+#if 0
+ putLabeledWord("add_node: alloced a new membase at ", *memBase);
+#endif
+
+ }
+ /* now we have room to add it. */
+ b = &memBase->nodes[index];
+ index ++;
+
+ memBase->index = index;
+ list->listMemBase = memBase;
+ list->listCount++;
+ return b;
+}
+
+static struct b_node *
+insert_node(struct b_list *list, u32 offset)
+{
+ struct b_node *new;
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ struct b_node *b, *prev;
+#endif
+
+ if (!(new = add_node(list))) {
+ putstr("add_node failed!\r\n");
+ return NULL;
+ }
+ new->offset = offset;
+
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ if (list->listTail != NULL && list->listCompare(new, list->listTail))
+ prev = list->listTail;
+ else if (list->listLast != NULL && list->listCompare(new, list->listLast))
+ prev = list->listLast;
+ else
+ prev = NULL;
+
+ for (b = (prev ? prev->next : list->listHead);
+ b != NULL && list->listCompare(new, b);
+ prev = b, b = b->next) {
+ list->listLoops++;
+ }
+ if (b != NULL)
+ list->listLast = prev;
+
+ if (b != NULL) {
+ new->next = b;
+ if (prev != NULL)
+ prev->next = new;
+ else
+ list->listHead = new;
+ } else
+#endif
+ {
+ new->next = (struct b_node *) NULL;
+ if (list->listTail != NULL) {
+ list->listTail->next = new;
+ list->listTail = new;
+ } else {
+ list->listTail = list->listHead = new;
+ }
+ }
+
+ return new;
+}
+
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+/* Sort data entries with the latest version last, so that if there
+ * is overlapping data the latest version will be used.
+ */
+static int compare_inodes(struct b_node *new, struct b_node *old)
+{
+ struct jffs2_raw_inode ojNew;
+ struct jffs2_raw_inode ojOld;
+ struct jffs2_raw_inode *jNew =
+ (struct jffs2_raw_inode *)get_fl_mem(new->offset, sizeof(ojNew), &ojNew);
+ struct jffs2_raw_inode *jOld =
+ (struct jffs2_raw_inode *)get_fl_mem(old->offset, sizeof(ojOld), &ojOld);
+
+ return jNew->version > jOld->version;
+}
+
+/* Sort directory entries so all entries in the same directory
+ * with the same name are grouped together, with the latest version
+ * last. This makes it easy to eliminate all but the latest version
+ * by marking the previous version dead by setting the inode to 0.
+ */
+static int compare_dirents(struct b_node *new, struct b_node *old)
+{
+ struct jffs2_raw_dirent ojNew;
+ struct jffs2_raw_dirent ojOld;
+ struct jffs2_raw_dirent *jNew =
+ (struct jffs2_raw_dirent *)get_fl_mem(new->offset, sizeof(ojNew), &ojNew);
+ struct jffs2_raw_dirent *jOld =
+ (struct jffs2_raw_dirent *)get_fl_mem(old->offset, sizeof(ojOld), &ojOld);
+ int cmp;
+
+ /* ascending sort by pino */
+ if (jNew->pino != jOld->pino)
+ return jNew->pino > jOld->pino;
+
+ /* pino is the same, so use ascending sort by nsize, so
+ * we don't do strncmp unless we really must.
+ */
+ if (jNew->nsize != jOld->nsize)
+ return jNew->nsize > jOld->nsize;
+
+ /* length is also the same, so use ascending sort by name
+ */
+ cmp = strncmp((char *)jNew->name, (char *)jOld->name, jNew->nsize);
+ if (cmp != 0)
+ return cmp > 0;
+
+ /* we have duplicate names in this directory, so use ascending
+ * sort by version
+ */
+ if (jNew->version > jOld->version) {
+ /* since jNew is newer, we know jOld is not valid, so
+ * mark it with inode 0 and it will not be used
+ */
+ jOld->ino = 0;
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+void
+jffs2_free_cache(struct part_info *part)
+{
+ struct b_lists *pL;
+
+ if (part->jffs2_priv != NULL) {
+ pL = (struct b_lists *)part->jffs2_priv;
+ free_nodes(&pL->frag);
+ free_nodes(&pL->dir);
+ free(pL->readbuf);
+ free(pL);
+ }
+}
+
+static u32
+jffs_init_1pass_list(struct part_info *part)
+{
+ struct b_lists *pL;
+
+ jffs2_free_cache(part);
+
+ if (NULL != (part->jffs2_priv = malloc(sizeof(struct b_lists)))) {
+ pL = (struct b_lists *)part->jffs2_priv;
+
+ memset(pL, 0, sizeof(*pL));
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ pL->dir.listCompare = compare_dirents;
+ pL->frag.listCompare = compare_inodes;
+#endif
+ }
+ return 0;
+}
+
+/* find the inode from the slashless name given a parent */
+static long
+jffs2_1pass_read_inode(struct b_lists *pL, u32 inode, char *dest)
+{
+ struct b_node *b;
+ struct jffs2_raw_inode *jNode;
+ u32 totalSize = 0;
+ u32 latestVersion = 0;
+ uchar *lDest;
+ uchar *src;
+ int i;
+ u32 counter = 0;
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ /* Find file size before loading any data, so fragments that
+ * start past the end of file can be ignored. A fragment
+ * that is partially in the file is loaded, so extra data may
+ * be loaded up to the next 4K boundary above the file size.
+ * This shouldn't cause trouble when loading kernel images, so
+ * we will live with it.
+ */
+ for (b = pL->frag.listHead; b != NULL; b = b->next) {
+ jNode = (struct jffs2_raw_inode *) get_fl_mem(b->offset,
+ sizeof(struct jffs2_raw_inode), pL->readbuf);
+ if ((inode == jNode->ino)) {
+ /* get actual file length from the newest node */
+ if (jNode->version >= latestVersion) {
+ totalSize = jNode->isize;
+ latestVersion = jNode->version;
+ }
+ }
+ put_fl_mem(jNode, pL->readbuf);
+ }
+#endif
+
+ for (b = pL->frag.listHead; b != NULL; b = b->next) {
+ jNode = (struct jffs2_raw_inode *) get_node_mem(b->offset,
+ pL->readbuf);
+ if ((inode == jNode->ino)) {
+#if 0
+ putLabeledWord("\r\n\r\nread_inode: totlen = ", jNode->totlen);
+ putLabeledWord("read_inode: inode = ", jNode->ino);
+ putLabeledWord("read_inode: version = ", jNode->version);
+ putLabeledWord("read_inode: isize = ", jNode->isize);
+ putLabeledWord("read_inode: offset = ", jNode->offset);
+ putLabeledWord("read_inode: csize = ", jNode->csize);
+ putLabeledWord("read_inode: dsize = ", jNode->dsize);
+ putLabeledWord("read_inode: compr = ", jNode->compr);
+ putLabeledWord("read_inode: usercompr = ", jNode->usercompr);
+ putLabeledWord("read_inode: flags = ", jNode->flags);
+#endif
+
+#ifndef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ /* get actual file length from the newest node */
+ if (jNode->version >= latestVersion) {
+ totalSize = jNode->isize;
+ latestVersion = jNode->version;
+ }
+#endif
+
+ if(dest) {
+ src = ((uchar *) jNode) + sizeof(struct jffs2_raw_inode);
+ /* ignore data behind latest known EOF */
+ if (jNode->offset > totalSize) {
+ put_fl_mem(jNode, pL->readbuf);
+ continue;
+ }
+ if (b->datacrc == CRC_UNKNOWN)
+ b->datacrc = data_crc(jNode) ?
+ CRC_OK : CRC_BAD;
+ if (b->datacrc == CRC_BAD) {
+ put_fl_mem(jNode, pL->readbuf);
+ continue;
+ }
+
+ lDest = (uchar *) (dest + jNode->offset);
+#if 0
+ putLabeledWord("read_inode: src = ", src);
+ putLabeledWord("read_inode: dest = ", lDest);
+#endif
+ switch (jNode->compr) {
+ case JFFS2_COMPR_NONE:
+ ldr_memcpy(lDest, src, jNode->dsize);
+ break;
+ case JFFS2_COMPR_ZERO:
+ for (i = 0; i < jNode->dsize; i++)
+ *(lDest++) = 0;
+ break;
+ case JFFS2_COMPR_RTIME:
+ rtime_decompress(src, lDest, jNode->csize, jNode->dsize);
+ break;
+ case JFFS2_COMPR_DYNRUBIN:
+ /* this is slow but it works */
+ dynrubin_decompress(src, lDest, jNode->csize, jNode->dsize);
+ break;
+ case JFFS2_COMPR_ZLIB:
+ zlib_decompress(src, lDest, jNode->csize, jNode->dsize);
+ break;
+#if defined(CONFIG_JFFS2_LZO)
+ case JFFS2_COMPR_LZO:
+ lzo_decompress(src, lDest, jNode->csize, jNode->dsize);
+ break;
+#endif
+ default:
+ /* unknown */
+ putLabeledWord("UNKNOWN COMPRESSION METHOD = ", jNode->compr);
+ put_fl_mem(jNode, pL->readbuf);
+ return -1;
+ break;
+ }
+ }
+
+#if 0
+ putLabeledWord("read_inode: totalSize = ", totalSize);
+#endif
+ }
+ counter++;
+ put_fl_mem(jNode, pL->readbuf);
+ }
+
+#if 0
+ putLabeledWord("read_inode: returning = ", totalSize);
+#endif
+ return totalSize;
+}
+
+/* find the inode from the slashless name given a parent */
+static u32
+jffs2_1pass_find_inode(struct b_lists * pL, const char *name, u32 pino)
+{
+ struct b_node *b;
+ struct jffs2_raw_dirent *jDir;
+ int len;
+ u32 counter;
+ u32 version = 0;
+ u32 inode = 0;
+
+ /* name is assumed slash free */
+ len = strlen(name);
+
+ counter = 0;
+ /* we need to search all and return the inode with the highest version */
+ for(b = pL->dir.listHead; b; b = b->next, counter++) {
+ jDir = (struct jffs2_raw_dirent *) get_node_mem(b->offset,
+ pL->readbuf);
+ if ((pino == jDir->pino) && (len == jDir->nsize) &&
+ (jDir->ino) && /* 0 for unlink */
+ (!strncmp((char *)jDir->name, name, len))) { /* a match */
+ if (jDir->version < version) {
+ put_fl_mem(jDir, pL->readbuf);
+ continue;
+ }
+
+ if (jDir->version == version && inode != 0) {
+ /* I'm pretty sure this isn't legal */
+ putstr(" ** ERROR ** ");
+ putnstr(jDir->name, jDir->nsize);
+ putLabeledWord(" has dup version =", version);
+ }
+ inode = jDir->ino;
+ version = jDir->version;
+ }
+#if 0
+ putstr("\r\nfind_inode:p&l ->");
+ putnstr(jDir->name, jDir->nsize);
+ putstr("\r\n");
+ putLabeledWord("pino = ", jDir->pino);
+ putLabeledWord("nsize = ", jDir->nsize);
+ putLabeledWord("b = ", (u32) b);
+ putLabeledWord("counter = ", counter);
+#endif
+ put_fl_mem(jDir, pL->readbuf);
+ }
+ return inode;
+}
+
+char *mkmodestr(unsigned long mode, char *str)
+{
+ static const char *l = "xwr";
+ int mask = 1, i;
+ char c;
+
+ switch (mode & S_IFMT) {
+ case S_IFDIR: str[0] = 'd'; break;
+ case S_IFBLK: str[0] = 'b'; break;
+ case S_IFCHR: str[0] = 'c'; break;
+ case S_IFIFO: str[0] = 'f'; break;
+ case S_IFLNK: str[0] = 'l'; break;
+ case S_IFSOCK: str[0] = 's'; break;
+ case S_IFREG: str[0] = '-'; break;
+ default: str[0] = '?';
+ }
+
+ for(i = 0; i < 9; i++) {
+ c = l[i%3];
+ str[9-i] = (mode & mask)?c:'-';
+ mask = mask<<1;
+ }
+
+ if(mode & S_ISUID) str[3] = (mode & S_IXUSR)?'s':'S';
+ if(mode & S_ISGID) str[6] = (mode & S_IXGRP)?'s':'S';
+ if(mode & S_ISVTX) str[9] = (mode & S_IXOTH)?'t':'T';
+ str[10] = '\0';
+ return str;
+}
+
+static inline void dump_stat(struct stat *st, const char *name)
+{
+ char str[20];
+ char s[64], *p;
+
+ if (st->st_mtime == (time_t)(-1)) /* some ctimes really hate -1 */
+ st->st_mtime = 1;
+
+ ctime_r((time_t *)&st->st_mtime, s/*,64*/); /* newlib ctime doesn't have buflen */
+
+ if ((p = strchr(s,'\n')) != NULL) *p = '\0';
+ if ((p = strchr(s,'\r')) != NULL) *p = '\0';
+
+/*
+ printf("%6lo %s %8ld %s %s\n", st->st_mode, mkmodestr(st->st_mode, str),
+ st->st_size, s, name);
+*/
+
+ printf(" %s %8ld %s %s", mkmodestr(st->st_mode,str), st->st_size, s, name);
+}
+
+static inline u32 dump_inode(struct b_lists * pL, struct jffs2_raw_dirent *d, struct jffs2_raw_inode *i)
+{
+ char fname[256];
+ struct stat st;
+
+ if(!d || !i) return -1;
+
+ strncpy(fname, (char *)d->name, d->nsize);
+ fname[d->nsize] = '\0';
+
+ memset(&st,0,sizeof(st));
+
+ st.st_mtime = i->mtime;
+ st.st_mode = i->mode;
+ st.st_ino = i->ino;
+ st.st_size = i->isize;
+
+ dump_stat(&st, fname);
+
+ if (d->type == DT_LNK) {
+ unsigned char *src = (unsigned char *) (&i[1]);
+ putstr(" -> ");
+ putnstr(src, (int)i->dsize);
+ }
+
+ putstr("\r\n");
+
+ return 0;
+}
+
+/* list inodes with the given pino */
+static u32
+jffs2_1pass_list_inodes(struct b_lists * pL, u32 pino)
+{
+ struct b_node *b;
+ struct jffs2_raw_dirent *jDir;
+
+ for (b = pL->dir.listHead; b; b = b->next) {
+ jDir = (struct jffs2_raw_dirent *) get_node_mem(b->offset,
+ pL->readbuf);
+ if ((pino == jDir->pino) && (jDir->ino)) { /* ino=0 -> unlink */
+ u32 i_version = 0;
+ struct jffs2_raw_inode ojNode;
+ struct jffs2_raw_inode *jNode, *i = NULL;
+ struct b_node *b2 = pL->frag.listHead;
+
+ while (b2) {
+ jNode = (struct jffs2_raw_inode *)
+ get_fl_mem(b2->offset, sizeof(ojNode), &ojNode);
+ if (jNode->ino == jDir->ino && jNode->version >= i_version) {
+ i_version = jNode->version;
+ if (i)
+ put_fl_mem(i, NULL);
+
+ if (jDir->type == DT_LNK)
+ i = get_node_mem(b2->offset,
+ NULL);
+ else
+ i = get_fl_mem(b2->offset,
+ sizeof(*i),
+ NULL);
+ }
+ b2 = b2->next;
+ }
+
+ dump_inode(pL, jDir, i);
+ put_fl_mem(i, NULL);
+ }
+ put_fl_mem(jDir, pL->readbuf);
+ }
+ return pino;
+}
+
+static u32
+jffs2_1pass_search_inode(struct b_lists * pL, const char *fname, u32 pino)
+{
+ int i;
+ char tmp[256];
+ char working_tmp[256];
+ char *c;
+
+ /* discard any leading slash */
+ i = 0;
+ while (fname[i] == '/')
+ i++;
+ strcpy(tmp, &fname[i]);
+
+ while ((c = (char *) strchr(tmp, '/'))) /* we are still dired searching */
+ {
+ strncpy(working_tmp, tmp, c - tmp);
+ working_tmp[c - tmp] = '\0';
+#if 0
+ putstr("search_inode: tmp = ");
+ putstr(tmp);
+ putstr("\r\n");
+ putstr("search_inode: wtmp = ");
+ putstr(working_tmp);
+ putstr("\r\n");
+ putstr("search_inode: c = ");
+ putstr(c);
+ putstr("\r\n");
+#endif
+ for (i = 0; i < strlen(c) - 1; i++)
+ tmp[i] = c[i + 1];
+ tmp[i] = '\0';
+#if 0
+ putstr("search_inode: post tmp = ");
+ putstr(tmp);
+ putstr("\r\n");
+#endif
+
+ if (!(pino = jffs2_1pass_find_inode(pL, working_tmp, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(working_tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ }
+ /* this is for the bare filename, directories have already been mapped */
+ if (!(pino = jffs2_1pass_find_inode(pL, tmp, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ return pino;
+
+}
+
+static u32
+jffs2_1pass_resolve_inode(struct b_lists * pL, u32 ino)
+{
+ struct b_node *b;
+ struct b_node *b2;
+ struct jffs2_raw_dirent *jDir;
+ struct jffs2_raw_inode *jNode;
+ u8 jDirFoundType = 0;
+ u32 jDirFoundIno = 0;
+ u32 jDirFoundPino = 0;
+ char tmp[256];
+ u32 version = 0;
+ u32 pino;
+ unsigned char *src;
+
+ /* we need to search all and return the inode with the highest version */
+ for(b = pL->dir.listHead; b; b = b->next) {
+ jDir = (struct jffs2_raw_dirent *) get_node_mem(b->offset,
+ pL->readbuf);
+ if (ino == jDir->ino) {
+ if (jDir->version < version) {
+ put_fl_mem(jDir, pL->readbuf);
+ continue;
+ }
+
+ if (jDir->version == version && jDirFoundType) {
+ /* I'm pretty sure this isn't legal */
+ putstr(" ** ERROR ** ");
+ putnstr(jDir->name, jDir->nsize);
+ putLabeledWord(" has dup version (resolve) = ",
+ version);
+ }
+
+ jDirFoundType = jDir->type;
+ jDirFoundIno = jDir->ino;
+ jDirFoundPino = jDir->pino;
+ version = jDir->version;
+ }
+ put_fl_mem(jDir, pL->readbuf);
+ }
+ /* now we found the right entry again. (shoulda returned inode*) */
+ if (jDirFoundType != DT_LNK)
+ return jDirFoundIno;
+
+ /* it's a soft link so we follow it again. */
+ b2 = pL->frag.listHead;
+ while (b2) {
+ jNode = (struct jffs2_raw_inode *) get_node_mem(b2->offset,
+ pL->readbuf);
+ if (jNode->ino == jDirFoundIno) {
+ src = (unsigned char *)jNode + sizeof(struct jffs2_raw_inode);
+
+#if 0
+ putLabeledWord("\t\t dsize = ", jNode->dsize);
+ putstr("\t\t target = ");
+ putnstr(src, jNode->dsize);
+ putstr("\r\n");
+#endif
+ strncpy(tmp, (char *)src, jNode->dsize);
+ tmp[jNode->dsize] = '\0';
+ put_fl_mem(jNode, pL->readbuf);
+ break;
+ }
+ b2 = b2->next;
+ put_fl_mem(jNode, pL->readbuf);
+ }
+ /* ok so the name of the new file to find is in tmp */
+ /* if it starts with a slash it is root based else shared dirs */
+ if (tmp[0] == '/')
+ pino = 1;
+ else
+ pino = jDirFoundPino;
+
+ return jffs2_1pass_search_inode(pL, tmp, pino);
+}
+
+static u32
+jffs2_1pass_search_list_inodes(struct b_lists * pL, const char *fname, u32 pino)
+{
+ int i;
+ char tmp[256];
+ char working_tmp[256];
+ char *c;
+
+ /* discard any leading slash */
+ i = 0;
+ while (fname[i] == '/')
+ i++;
+ strcpy(tmp, &fname[i]);
+ working_tmp[0] = '\0';
+ while ((c = (char *) strchr(tmp, '/'))) /* we are still dired searching */
+ {
+ strncpy(working_tmp, tmp, c - tmp);
+ working_tmp[c - tmp] = '\0';
+ for (i = 0; i < strlen(c) - 1; i++)
+ tmp[i] = c[i + 1];
+ tmp[i] = '\0';
+ /* only a failure if we arent looking at top level */
+ if (!(pino = jffs2_1pass_find_inode(pL, working_tmp, pino)) &&
+ (working_tmp[0])) {
+ putstr("find_inode failed for name=");
+ putstr(working_tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ }
+
+ if (tmp[0] && !(pino = jffs2_1pass_find_inode(pL, tmp, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ /* this is for the bare filename, directories have already been mapped */
+ if (!(pino = jffs2_1pass_list_inodes(pL, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ return pino;
+
+}
+
+unsigned char
+jffs2_1pass_rescan_needed(struct part_info *part)
+{
+ struct b_node *b;
+ struct jffs2_unknown_node onode;
+ struct jffs2_unknown_node *node;
+ struct b_lists *pL = (struct b_lists *)part->jffs2_priv;
+
+ if (part->jffs2_priv == 0){
+ DEBUGF ("rescan: First time in use\n");
+ return 1;
+ }
+
+ /* if we have no list, we need to rescan */
+ if (pL->frag.listCount == 0) {
+ DEBUGF ("rescan: fraglist zero\n");
+ return 1;
+ }
+
+ /* but suppose someone reflashed a partition at the same offset... */
+ b = pL->dir.listHead;
+ while (b) {
+ node = (struct jffs2_unknown_node *) get_fl_mem(b->offset,
+ sizeof(onode), &onode);
+ if (node->nodetype != JFFS2_NODETYPE_DIRENT) {
+ DEBUGF ("rescan: fs changed beneath me? (%lx)\n",
+ (unsigned long) b->offset);
+ return 1;
+ }
+ b = b->next;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_JFFS2_SUMMARY
+static u32 sum_get_unaligned32(u32 *ptr)
+{
+ u32 val;
+ u8 *p = (u8 *)ptr;
+
+ val = *p | (*(p + 1) << 8) | (*(p + 2) << 16) | (*(p + 3) << 24);
+
+ return __le32_to_cpu(val);
+}
+
+static u16 sum_get_unaligned16(u16 *ptr)
+{
+ u16 val;
+ u8 *p = (u8 *)ptr;
+
+ val = *p | (*(p + 1) << 8);
+
+ return __le16_to_cpu(val);
+}
+
+#define dbg_summary(...) do {} while (0);
+/*
+ * Process the stored summary information - helper function for
+ * jffs2_sum_scan_sumnode()
+ */
+
+static int jffs2_sum_process_sum_data(struct part_info *part, uint32_t offset,
+ struct jffs2_raw_summary *summary,
+ struct b_lists *pL)
+{
+ void *sp;
+ int i, pass;
+ void *ret;
+
+ for (pass = 0; pass < 2; pass++) {
+ sp = summary->sum;
+
+ for (i = 0; i < summary->sum_num; i++) {
+ struct jffs2_sum_unknown_flash *spu = sp;
+ dbg_summary("processing summary index %d\n", i);
+
+ switch (sum_get_unaligned16(&spu->nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_flash *spi;
+ if (pass) {
+ spi = sp;
+
+ ret = insert_node(&pL->frag,
+ (u32)part->offset +
+ offset +
+ sum_get_unaligned32(
+ &spi->offset));
+ if (ret == NULL)
+ return -1;
+ }
+
+ sp += JFFS2_SUMMARY_INODE_SIZE;
+
+ break;
+ }
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_flash *spd;
+ spd = sp;
+ if (pass) {
+ ret = insert_node(&pL->dir,
+ (u32) part->offset +
+ offset +
+ sum_get_unaligned32(
+ &spd->offset));
+ if (ret == NULL)
+ return -1;
+ }
+
+ sp += JFFS2_SUMMARY_DIRENT_SIZE(
+ spd->nsize);
+
+ break;
+ }
+ default : {
+ uint16_t nodetype = sum_get_unaligned16(
+ &spu->nodetype);
+ printf("Unsupported node type %x found"
+ " in summary!\n",
+ nodetype);
+ if ((nodetype & JFFS2_COMPAT_MASK) ==
+ JFFS2_FEATURE_INCOMPAT)
+ return -EIO;
+ return -EBADMSG;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/* Process the summary node - called from jffs2_scan_eraseblock() */
+int jffs2_sum_scan_sumnode(struct part_info *part, uint32_t offset,
+ struct jffs2_raw_summary *summary, uint32_t sumsize,
+ struct b_lists *pL)
+{
+ struct jffs2_unknown_node crcnode;
+ int ret, ofs;
+ uint32_t crc;
+
+ ofs = part->sector_size - sumsize;
+
+ dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n",
+ offset, offset + ofs, sumsize);
+
+ /* OK, now check for node validity and CRC */
+ crcnode.magic = JFFS2_MAGIC_BITMASK;
+ crcnode.nodetype = JFFS2_NODETYPE_SUMMARY;
+ crcnode.totlen = summary->totlen;
+ crc = crc32_no_comp(0, (uchar *)&crcnode, sizeof(crcnode)-4);
+
+ if (summary->hdr_crc != crc) {
+ dbg_summary("Summary node header is corrupt (bad CRC or "
+ "no summary at all)\n");
+ goto crc_err;
+ }
+
+ if (summary->totlen != sumsize) {
+ dbg_summary("Summary node is corrupt (wrong erasesize?)\n");
+ goto crc_err;
+ }
+
+ crc = crc32_no_comp(0, (uchar *)summary,
+ sizeof(struct jffs2_raw_summary)-8);
+
+ if (summary->node_crc != crc) {
+ dbg_summary("Summary node is corrupt (bad CRC)\n");
+ goto crc_err;
+ }
+
+ crc = crc32_no_comp(0, (uchar *)summary->sum,
+ sumsize - sizeof(struct jffs2_raw_summary));
+
+ if (summary->sum_crc != crc) {
+ dbg_summary("Summary node data is corrupt (bad CRC)\n");
+ goto crc_err;
+ }
+
+ if (summary->cln_mkr)
+ dbg_summary("Summary : CLEANMARKER node \n");
+
+ ret = jffs2_sum_process_sum_data(part, offset, summary, pL);
+ if (ret == -EBADMSG)
+ return 0;
+ if (ret)
+ return ret; /* real error */
+
+ return 1;
+
+crc_err:
+ putstr("Summary node crc error, skipping summary information.\n");
+
+ return 0;
+}
+#endif /* CONFIG_JFFS2_SUMMARY */
+
+#ifdef DEBUG_FRAGMENTS
+static void
+dump_fragments(struct b_lists *pL)
+{
+ struct b_node *b;
+ struct jffs2_raw_inode ojNode;
+ struct jffs2_raw_inode *jNode;
+
+ putstr("\r\n\r\n******The fragment Entries******\r\n");
+ b = pL->frag.listHead;
+ while (b) {
+ jNode = (struct jffs2_raw_inode *) get_fl_mem(b->offset,
+ sizeof(ojNode), &ojNode);
+ putLabeledWord("\r\n\tbuild_list: FLASH_OFFSET = ", b->offset);
+ putLabeledWord("\tbuild_list: totlen = ", jNode->totlen);
+ putLabeledWord("\tbuild_list: inode = ", jNode->ino);
+ putLabeledWord("\tbuild_list: version = ", jNode->version);
+ putLabeledWord("\tbuild_list: isize = ", jNode->isize);
+ putLabeledWord("\tbuild_list: atime = ", jNode->atime);
+ putLabeledWord("\tbuild_list: offset = ", jNode->offset);
+ putLabeledWord("\tbuild_list: csize = ", jNode->csize);
+ putLabeledWord("\tbuild_list: dsize = ", jNode->dsize);
+ putLabeledWord("\tbuild_list: compr = ", jNode->compr);
+ putLabeledWord("\tbuild_list: usercompr = ", jNode->usercompr);
+ putLabeledWord("\tbuild_list: flags = ", jNode->flags);
+ putLabeledWord("\tbuild_list: offset = ", b->offset); /* FIXME: ? [RS] */
+ b = b->next;
+ }
+}
+#endif
+
+#ifdef DEBUG_DIRENTS
+static void
+dump_dirents(struct b_lists *pL)
+{
+ struct b_node *b;
+ struct jffs2_raw_dirent *jDir;
+
+ putstr("\r\n\r\n******The directory Entries******\r\n");
+ b = pL->dir.listHead;
+ while (b) {
+ jDir = (struct jffs2_raw_dirent *) get_node_mem(b->offset,
+ pL->readbuf);
+ putstr("\r\n");
+ putnstr(jDir->name, jDir->nsize);
+ putLabeledWord("\r\n\tbuild_list: magic = ", jDir->magic);
+ putLabeledWord("\tbuild_list: nodetype = ", jDir->nodetype);
+ putLabeledWord("\tbuild_list: hdr_crc = ", jDir->hdr_crc);
+ putLabeledWord("\tbuild_list: pino = ", jDir->pino);
+ putLabeledWord("\tbuild_list: version = ", jDir->version);
+ putLabeledWord("\tbuild_list: ino = ", jDir->ino);
+ putLabeledWord("\tbuild_list: mctime = ", jDir->mctime);
+ putLabeledWord("\tbuild_list: nsize = ", jDir->nsize);
+ putLabeledWord("\tbuild_list: type = ", jDir->type);
+ putLabeledWord("\tbuild_list: node_crc = ", jDir->node_crc);
+ putLabeledWord("\tbuild_list: name_crc = ", jDir->name_crc);
+ putLabeledWord("\tbuild_list: offset = ", b->offset); /* FIXME: ? [RS] */
+ b = b->next;
+ put_fl_mem(jDir, pL->readbuf);
+ }
+}
+#endif
+
+#define DEFAULT_EMPTY_SCAN_SIZE 4096
+
+static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size)
+{
+ if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
+ return sector_size;
+ else
+ return DEFAULT_EMPTY_SCAN_SIZE;
+}
+
+static u32
+jffs2_1pass_build_lists(struct part_info * part)
+{
+ struct b_lists *pL;
+ struct jffs2_unknown_node *node;
+ u32 nr_sectors;
+ u32 i;
+ u32 counter4 = 0;
+ u32 counterF = 0;
+ u32 counterN = 0;
+ u32 max_totlen = 0;
+ u32 buf_size = DEFAULT_EMPTY_SCAN_SIZE;
+ char *buf;
+
+ nr_sectors = lldiv(part->size, part->sector_size);
+ /* turn off the lcd. Refreshing the lcd adds 50% overhead to the */
+ /* jffs2 list building enterprise nope. in newer versions the overhead is */
+ /* only about 5 %. not enough to inconvenience people for. */
+ /* lcd_off(); */
+
+ /* if we are building a list we need to refresh the cache. */
+ jffs_init_1pass_list(part);
+ pL = (struct b_lists *)part->jffs2_priv;
+ buf = malloc(buf_size);
+ puts ("Scanning JFFS2 FS: ");
+
+ /* start at the beginning of the partition */
+ for (i = 0; i < nr_sectors; i++) {
+ uint32_t sector_ofs = i * part->sector_size;
+ uint32_t buf_ofs = sector_ofs;
+ uint32_t buf_len;
+ uint32_t ofs, prevofs;
+#ifdef CONFIG_JFFS2_SUMMARY
+ struct jffs2_sum_marker *sm;
+ void *sumptr = NULL;
+ uint32_t sumlen;
+ int ret;
+#endif
+
+ WATCHDOG_RESET();
+
+#ifdef CONFIG_JFFS2_SUMMARY
+ buf_len = sizeof(*sm);
+
+ /* Read as much as we want into the _end_ of the preallocated
+ * buffer
+ */
+ get_fl_mem(part->offset + sector_ofs + part->sector_size -
+ buf_len, buf_len, buf + buf_size - buf_len);
+
+ sm = (void *)buf + buf_size - sizeof(*sm);
+ if (sm->magic == JFFS2_SUM_MAGIC) {
+ sumlen = part->sector_size - sm->offset;
+ sumptr = buf + buf_size - sumlen;
+
+ /* Now, make sure the summary itself is available */
+ if (sumlen > buf_size) {
+ /* Need to kmalloc for this. */
+ sumptr = malloc(sumlen);
+ if (!sumptr) {
+ putstr("Can't get memory for summary "
+ "node!\n");
+ free(buf);
+ jffs2_free_cache(part);
+ return 0;
+ }
+ memcpy(sumptr + sumlen - buf_len, buf +
+ buf_size - buf_len, buf_len);
+ }
+ if (buf_len < sumlen) {
+ /* Need to read more so that the entire summary
+ * node is present
+ */
+ get_fl_mem(part->offset + sector_ofs +
+ part->sector_size - sumlen,
+ sumlen - buf_len, sumptr);
+ }
+ }
+
+ if (sumptr) {
+ ret = jffs2_sum_scan_sumnode(part, sector_ofs, sumptr,
+ sumlen, pL);
+
+ if (buf_size && sumlen > buf_size)
+ free(sumptr);
+ if (ret < 0) {
+ free(buf);
+ jffs2_free_cache(part);
+ return 0;
+ }
+ if (ret)
+ continue;
+
+ }
+#endif /* CONFIG_JFFS2_SUMMARY */
+
+ buf_len = EMPTY_SCAN_SIZE(part->sector_size);
+
+ get_fl_mem((u32)part->offset + buf_ofs, buf_len, buf);
+
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+
+ /* Scan only 4KiB of 0xFF before declaring it's empty */
+ while (ofs < EMPTY_SCAN_SIZE(part->sector_size) &&
+ *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
+ ofs += 4;
+
+ if (ofs == EMPTY_SCAN_SIZE(part->sector_size))
+ continue;
+
+ ofs += sector_ofs;
+ prevofs = ofs - 1;
+
+ scan_more:
+ while (ofs < sector_ofs + part->sector_size) {
+ if (ofs == prevofs) {
+ printf("offset %08x already seen, skip\n", ofs);
+ ofs += 4;
+ counter4++;
+ continue;
+ }
+ prevofs = ofs;
+ if (sector_ofs + part->sector_size <
+ ofs + sizeof(*node))
+ break;
+ if (buf_ofs + buf_len < ofs + sizeof(*node)) {
+ buf_len = min_t(uint32_t, buf_size, sector_ofs
+ + part->sector_size - ofs);
+ get_fl_mem((u32)part->offset + ofs, buf_len,
+ buf);
+ buf_ofs = ofs;
+ }
+
+ node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
+
+ if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
+ uint32_t inbuf_ofs;
+ uint32_t scan_end;
+
+ ofs += 4;
+ scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(
+ part->sector_size)/8,
+ buf_len);
+ more_empty:
+ inbuf_ofs = ofs - buf_ofs;
+ while (inbuf_ofs < scan_end) {
+ if (*(uint32_t *)(&buf[inbuf_ofs]) !=
+ 0xffffffff)
+ goto scan_more;
+
+ inbuf_ofs += 4;
+ ofs += 4;
+ }
+ /* Ran off end. */
+
+ /* See how much more there is to read in this
+ * eraseblock...
+ */
+ buf_len = min_t(uint32_t, buf_size,
+ sector_ofs +
+ part->sector_size - ofs);
+ if (!buf_len) {
+ /* No more to read. Break out of main
+ * loop without marking this range of
+ * empty space as dirty (because it's
+ * not)
+ */
+ break;
+ }
+ scan_end = buf_len;
+ get_fl_mem((u32)part->offset + ofs, buf_len,
+ buf);
+ buf_ofs = ofs;
+ goto more_empty;
+ }
+ if (node->magic != JFFS2_MAGIC_BITMASK ||
+ !hdr_crc(node)) {
+ ofs += 4;
+ counter4++;
+ continue;
+ }
+ if (ofs + node->totlen >
+ sector_ofs + part->sector_size) {
+ ofs += 4;
+ counter4++;
+ continue;
+ }
+ /* if its a fragment add it */
+ switch (node->nodetype) {
+ case JFFS2_NODETYPE_INODE:
+ if (buf_ofs + buf_len < ofs + sizeof(struct
+ jffs2_raw_inode)) {
+ get_fl_mem((u32)part->offset + ofs,
+ buf_len, buf);
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+ if (!inode_crc((struct jffs2_raw_inode *) node))
+ break;
+
+ if (insert_node(&pL->frag, (u32) part->offset +
+ ofs) == NULL) {
+ free(buf);
+ jffs2_free_cache(part);
+ return 0;
+ }
+ if (max_totlen < node->totlen)
+ max_totlen = node->totlen;
+ break;
+ case JFFS2_NODETYPE_DIRENT:
+ if (buf_ofs + buf_len < ofs + sizeof(struct
+ jffs2_raw_dirent) +
+ ((struct
+ jffs2_raw_dirent *)
+ node)->nsize) {
+ get_fl_mem((u32)part->offset + ofs,
+ buf_len, buf);
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+
+ if (!dirent_crc((struct jffs2_raw_dirent *)
+ node) ||
+ !dirent_name_crc(
+ (struct
+ jffs2_raw_dirent *)
+ node))
+ break;
+ if (! (counterN%100))
+ puts ("\b\b. ");
+ if (insert_node(&pL->dir, (u32) part->offset +
+ ofs) == NULL) {
+ free(buf);
+ jffs2_free_cache(part);
+ return 0;
+ }
+ if (max_totlen < node->totlen)
+ max_totlen = node->totlen;
+ counterN++;
+ break;
+ case JFFS2_NODETYPE_CLEANMARKER:
+ if (node->totlen != sizeof(struct jffs2_unknown_node))
+ printf("OOPS Cleanmarker has bad size "
+ "%d != %zu\n",
+ node->totlen,
+ sizeof(struct jffs2_unknown_node));
+ break;
+ case JFFS2_NODETYPE_PADDING:
+ if (node->totlen < sizeof(struct jffs2_unknown_node))
+ printf("OOPS Padding has bad size "
+ "%d < %zu\n",
+ node->totlen,
+ sizeof(struct jffs2_unknown_node));
+ break;
+ case JFFS2_NODETYPE_SUMMARY:
+ break;
+ default:
+ printf("Unknown node type: %x len %d offset 0x%x\n",
+ node->nodetype,
+ node->totlen, ofs);
+ }
+ ofs += ((node->totlen + 3) & ~3);
+ counterF++;
+ }
+ }
+
+ free(buf);
+ putstr("\b\b done.\r\n"); /* close off the dots */
+
+ /* We don't care if malloc failed - then each read operation will
+ * allocate its own buffer as necessary (NAND) or will read directly
+ * from flash (NOR).
+ */
+ pL->readbuf = malloc(max_totlen);
+
+ /* turn the lcd back on. */
+ /* splash(); */
+
+#if 0
+ putLabeledWord("dir entries = ", pL->dir.listCount);
+ putLabeledWord("frag entries = ", pL->frag.listCount);
+ putLabeledWord("+4 increments = ", counter4);
+ putLabeledWord("+file_offset increments = ", counterF);
+
+#endif
+
+#ifdef DEBUG_DIRENTS
+ dump_dirents(pL);
+#endif
+
+#ifdef DEBUG_FRAGMENTS
+ dump_fragments(pL);
+#endif
+
+ /* give visual feedback that we are done scanning the flash */
+ led_blink(0x0, 0x0, 0x1, 0x1); /* off, forever, on 100ms, off 100ms */
+ return 1;
+}
+
+
+static u32
+jffs2_1pass_fill_info(struct b_lists * pL, struct b_jffs2_info * piL)
+{
+ struct b_node *b;
+ struct jffs2_raw_inode ojNode;
+ struct jffs2_raw_inode *jNode;
+ int i;
+
+ for (i = 0; i < JFFS2_NUM_COMPR; i++) {
+ piL->compr_info[i].num_frags = 0;
+ piL->compr_info[i].compr_sum = 0;
+ piL->compr_info[i].decompr_sum = 0;
+ }
+
+ b = pL->frag.listHead;
+ while (b) {
+ jNode = (struct jffs2_raw_inode *) get_fl_mem(b->offset,
+ sizeof(ojNode), &ojNode);
+ if (jNode->compr < JFFS2_NUM_COMPR) {
+ piL->compr_info[jNode->compr].num_frags++;
+ piL->compr_info[jNode->compr].compr_sum += jNode->csize;
+ piL->compr_info[jNode->compr].decompr_sum += jNode->dsize;
+ }
+ b = b->next;
+ }
+ return 0;
+}
+
+
+static struct b_lists *
+jffs2_get_list(struct part_info * part, const char *who)
+{
+ /* copy requested part_info struct pointer to global location */
+ current_part = part;
+
+ if (jffs2_1pass_rescan_needed(part)) {
+ if (!jffs2_1pass_build_lists(part)) {
+ printf("%s: Failed to scan JFFSv2 file structure\n", who);
+ return NULL;
+ }
+ }
+ return (struct b_lists *)part->jffs2_priv;
+}
+
+
+/* Print directory / file contents */
+u32
+jffs2_1pass_ls(struct part_info * part, const char *fname)
+{
+ struct b_lists *pl;
+ long ret = 1;
+ u32 inode;
+
+ if (! (pl = jffs2_get_list(part, "ls")))
+ return 0;
+
+ if (! (inode = jffs2_1pass_search_list_inodes(pl, fname, 1))) {
+ putstr("ls: Failed to scan jffs2 file structure\r\n");
+ return 0;
+ }
+
+
+#if 0
+ putLabeledWord("found file at inode = ", inode);
+ putLabeledWord("read_inode returns = ", ret);
+#endif
+
+ return ret;
+}
+
+
+/* Load a file from flash into memory. fname can be a full path */
+u32
+jffs2_1pass_load(char *dest, struct part_info * part, const char *fname)
+{
+
+ struct b_lists *pl;
+ long ret = 1;
+ u32 inode;
+
+ if (! (pl = jffs2_get_list(part, "load")))
+ return 0;
+
+ if (! (inode = jffs2_1pass_search_inode(pl, fname, 1))) {
+ putstr("load: Failed to find inode\r\n");
+ return 0;
+ }
+
+ /* Resolve symlinks */
+ if (! (inode = jffs2_1pass_resolve_inode(pl, inode))) {
+ putstr("load: Failed to resolve inode structure\r\n");
+ return 0;
+ }
+
+ if ((ret = jffs2_1pass_read_inode(pl, inode, dest)) < 0) {
+ putstr("load: Failed to read inode\r\n");
+ return 0;
+ }
+
+ DEBUGF ("load: loaded '%s' to 0x%lx (%ld bytes)\n", fname,
+ (unsigned long) dest, ret);
+ return ret;
+}
+
+/* Return information about the fs on this partition */
+u32
+jffs2_1pass_info(struct part_info * part)
+{
+ struct b_jffs2_info info;
+ struct b_lists *pl;
+ int i;
+
+ if (! (pl = jffs2_get_list(part, "info")))
+ return 0;
+
+ jffs2_1pass_fill_info(pl, &info);
+ for (i = 0; i < JFFS2_NUM_COMPR; i++) {
+ printf ("Compression: %s\n"
+ "\tfrag count: %d\n"
+ "\tcompressed sum: %d\n"
+ "\tuncompressed sum: %d\n",
+ compr_names[i],
+ info.compr_info[i].num_frags,
+ info.compr_info[i].compr_sum,
+ info.compr_info[i].decompr_sum);
+ }
+ return 1;
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/jffs2_nand_1pass.c b/qemu/roms/u-boot/fs/jffs2/jffs2_nand_1pass.c
new file mode 100644
index 000000000..740f787dd
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/jffs2_nand_1pass.c
@@ -0,0 +1,1030 @@
+#include <common.h>
+
+#include <malloc.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+
+#include <jffs2/jffs2.h>
+#include <jffs2/jffs2_1pass.h>
+#include <nand.h>
+
+#include "jffs2_nand_private.h"
+
+#define NODE_CHUNK 1024 /* size of memory allocation chunk in b_nodes */
+
+/* Debugging switches */
+#undef DEBUG_DIRENTS /* print directory entry list after scan */
+#undef DEBUG_FRAGMENTS /* print fragment list after scan */
+#undef DEBUG /* enable debugging messages */
+
+#ifdef DEBUG
+# define DEBUGF(fmt,args...) printf(fmt ,##args)
+#else
+# define DEBUGF(fmt,args...)
+#endif
+
+static nand_info_t *nand;
+
+/* Compression names */
+static char *compr_names[] = {
+ "NONE",
+ "ZERO",
+ "RTIME",
+ "RUBINMIPS",
+ "COPY",
+ "DYNRUBIN",
+ "ZLIB",
+#if defined(CONFIG_JFFS2_LZO)
+ "LZO",
+#endif
+};
+
+/* Spinning wheel */
+static char spinner[] = { '|', '/', '-', '\\' };
+
+/* Memory management */
+struct mem_block {
+ unsigned index;
+ struct mem_block *next;
+ char nodes[0];
+};
+
+static void
+free_nodes(struct b_list *list)
+{
+ while (list->listMemBase != NULL) {
+ struct mem_block *next = list->listMemBase->next;
+ free(list->listMemBase);
+ list->listMemBase = next;
+ }
+}
+
+static struct b_node *
+add_node(struct b_list *list, int size)
+{
+ u32 index = 0;
+ struct mem_block *memBase;
+ struct b_node *b;
+
+ memBase = list->listMemBase;
+ if (memBase != NULL)
+ index = memBase->index;
+
+ if (memBase == NULL || index >= NODE_CHUNK) {
+ /* we need more space before we continue */
+ memBase = mmalloc(sizeof(struct mem_block) + NODE_CHUNK * size);
+ if (memBase == NULL) {
+ putstr("add_node: malloc failed\n");
+ return NULL;
+ }
+ memBase->next = list->listMemBase;
+ index = 0;
+ }
+ /* now we have room to add it. */
+ b = (struct b_node *)&memBase->nodes[size * index];
+ index ++;
+
+ memBase->index = index;
+ list->listMemBase = memBase;
+ list->listCount++;
+ return b;
+}
+
+static struct b_node *
+insert_node(struct b_list *list, struct b_node *new)
+{
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ struct b_node *b, *prev;
+
+ if (list->listTail != NULL && list->listCompare(new, list->listTail))
+ prev = list->listTail;
+ else if (list->listLast != NULL && list->listCompare(new, list->listLast))
+ prev = list->listLast;
+ else
+ prev = NULL;
+
+ for (b = (prev ? prev->next : list->listHead);
+ b != NULL && list->listCompare(new, b);
+ prev = b, b = b->next) {
+ list->listLoops++;
+ }
+ if (b != NULL)
+ list->listLast = prev;
+
+ if (b != NULL) {
+ new->next = b;
+ if (prev != NULL)
+ prev->next = new;
+ else
+ list->listHead = new;
+ } else
+#endif
+ {
+ new->next = (struct b_node *) NULL;
+ if (list->listTail != NULL) {
+ list->listTail->next = new;
+ list->listTail = new;
+ } else {
+ list->listTail = list->listHead = new;
+ }
+ }
+
+ return new;
+}
+
+static struct b_node *
+insert_inode(struct b_list *list, struct jffs2_raw_inode *node, u32 offset)
+{
+ struct b_inode *new;
+
+ if (!(new = (struct b_inode *)add_node(list, sizeof(struct b_inode)))) {
+ putstr("add_node failed!\r\n");
+ return NULL;
+ }
+ new->offset = offset;
+ new->version = node->version;
+ new->ino = node->ino;
+ new->isize = node->isize;
+ new->csize = node->csize;
+
+ return insert_node(list, (struct b_node *)new);
+}
+
+static struct b_node *
+insert_dirent(struct b_list *list, struct jffs2_raw_dirent *node, u32 offset)
+{
+ struct b_dirent *new;
+
+ if (!(new = (struct b_dirent *)add_node(list, sizeof(struct b_dirent)))) {
+ putstr("add_node failed!\r\n");
+ return NULL;
+ }
+ new->offset = offset;
+ new->version = node->version;
+ new->pino = node->pino;
+ new->ino = node->ino;
+ new->nhash = full_name_hash(node->name, node->nsize);
+ new->nsize = node->nsize;
+ new->type = node->type;
+
+ return insert_node(list, (struct b_node *)new);
+}
+
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+/* Sort data entries with the latest version last, so that if there
+ * is overlapping data the latest version will be used.
+ */
+static int compare_inodes(struct b_node *new, struct b_node *old)
+{
+ struct jffs2_raw_inode ojNew;
+ struct jffs2_raw_inode ojOld;
+ struct jffs2_raw_inode *jNew =
+ (struct jffs2_raw_inode *)get_fl_mem(new->offset, sizeof(ojNew), &ojNew);
+ struct jffs2_raw_inode *jOld =
+ (struct jffs2_raw_inode *)get_fl_mem(old->offset, sizeof(ojOld), &ojOld);
+
+ return jNew->version > jOld->version;
+}
+
+/* Sort directory entries so all entries in the same directory
+ * with the same name are grouped together, with the latest version
+ * last. This makes it easy to eliminate all but the latest version
+ * by marking the previous version dead by setting the inode to 0.
+ */
+static int compare_dirents(struct b_node *new, struct b_node *old)
+{
+ struct jffs2_raw_dirent ojNew;
+ struct jffs2_raw_dirent ojOld;
+ struct jffs2_raw_dirent *jNew =
+ (struct jffs2_raw_dirent *)get_fl_mem(new->offset, sizeof(ojNew), &ojNew);
+ struct jffs2_raw_dirent *jOld =
+ (struct jffs2_raw_dirent *)get_fl_mem(old->offset, sizeof(ojOld), &ojOld);
+ int cmp;
+
+ /* ascending sort by pino */
+ if (jNew->pino != jOld->pino)
+ return jNew->pino > jOld->pino;
+
+ /* pino is the same, so use ascending sort by nsize, so
+ * we don't do strncmp unless we really must.
+ */
+ if (jNew->nsize != jOld->nsize)
+ return jNew->nsize > jOld->nsize;
+
+ /* length is also the same, so use ascending sort by name
+ */
+ cmp = strncmp(jNew->name, jOld->name, jNew->nsize);
+ if (cmp != 0)
+ return cmp > 0;
+
+ /* we have duplicate names in this directory, so use ascending
+ * sort by version
+ */
+ if (jNew->version > jOld->version) {
+ /* since jNew is newer, we know jOld is not valid, so
+ * mark it with inode 0 and it will not be used
+ */
+ jOld->ino = 0;
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+static u32
+jffs_init_1pass_list(struct part_info *part)
+{
+ struct b_lists *pL;
+
+ if (part->jffs2_priv != NULL) {
+ pL = (struct b_lists *)part->jffs2_priv;
+ free_nodes(&pL->frag);
+ free_nodes(&pL->dir);
+ free(pL);
+ }
+ if (NULL != (part->jffs2_priv = malloc(sizeof(struct b_lists)))) {
+ pL = (struct b_lists *)part->jffs2_priv;
+
+ memset(pL, 0, sizeof(*pL));
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ pL->dir.listCompare = compare_dirents;
+ pL->frag.listCompare = compare_inodes;
+#endif
+ }
+ return 0;
+}
+
+/* find the inode from the slashless name given a parent */
+static long
+jffs2_1pass_read_inode(struct b_lists *pL, u32 ino, char *dest,
+ struct stat *stat)
+{
+ struct b_inode *jNode;
+ u32 totalSize = 0;
+ u32 latestVersion = 0;
+ long ret;
+
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ /* Find file size before loading any data, so fragments that
+ * start past the end of file can be ignored. A fragment
+ * that is partially in the file is loaded, so extra data may
+ * be loaded up to the next 4K boundary above the file size.
+ * This shouldn't cause trouble when loading kernel images, so
+ * we will live with it.
+ */
+ for (jNode = (struct b_inode *)pL->frag.listHead; jNode; jNode = jNode->next) {
+ if ((ino == jNode->ino)) {
+ /* get actual file length from the newest node */
+ if (jNode->version >= latestVersion) {
+ totalSize = jNode->isize;
+ latestVersion = jNode->version;
+ }
+ }
+ }
+#endif
+
+ for (jNode = (struct b_inode *)pL->frag.listHead; jNode; jNode = jNode->next) {
+ if ((ino != jNode->ino))
+ continue;
+#ifndef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ /* get actual file length from the newest node */
+ if (jNode->version >= latestVersion) {
+ totalSize = jNode->isize;
+ latestVersion = jNode->version;
+ }
+#endif
+ if (dest || stat) {
+ char *src, *dst;
+ char data[4096 + sizeof(struct jffs2_raw_inode)];
+ struct jffs2_raw_inode *inode;
+ size_t len;
+
+ inode = (struct jffs2_raw_inode *)&data;
+ len = sizeof(struct jffs2_raw_inode);
+ if (dest)
+ len += jNode->csize;
+ nand_read(nand, jNode->offset, &len, inode);
+ /* ignore data behind latest known EOF */
+ if (inode->offset > totalSize)
+ continue;
+
+ if (stat) {
+ stat->st_mtime = inode->mtime;
+ stat->st_mode = inode->mode;
+ stat->st_ino = inode->ino;
+ stat->st_size = totalSize;
+ }
+
+ if (!dest)
+ continue;
+
+ src = ((char *) inode) + sizeof(struct jffs2_raw_inode);
+ dst = (char *) (dest + inode->offset);
+
+ switch (inode->compr) {
+ case JFFS2_COMPR_NONE:
+ ret = 0;
+ memcpy(dst, src, inode->dsize);
+ break;
+ case JFFS2_COMPR_ZERO:
+ ret = 0;
+ memset(dst, 0, inode->dsize);
+ break;
+ case JFFS2_COMPR_RTIME:
+ ret = 0;
+ rtime_decompress(src, dst, inode->csize, inode->dsize);
+ break;
+ case JFFS2_COMPR_DYNRUBIN:
+ /* this is slow but it works */
+ ret = 0;
+ dynrubin_decompress(src, dst, inode->csize, inode->dsize);
+ break;
+ case JFFS2_COMPR_ZLIB:
+ ret = zlib_decompress(src, dst, inode->csize, inode->dsize);
+ break;
+#if defined(CONFIG_JFFS2_LZO)
+ case JFFS2_COMPR_LZO:
+ ret = lzo_decompress(src, dst, inode->csize, inode->dsize);
+ break;
+#endif
+ default:
+ /* unknown */
+ putLabeledWord("UNKNOWN COMPRESSION METHOD = ", inode->compr);
+ return -1;
+ }
+ }
+ }
+
+ return totalSize;
+}
+
+/* find the inode from the slashless name given a parent */
+static u32
+jffs2_1pass_find_inode(struct b_lists * pL, const char *name, u32 pino)
+{
+ struct b_dirent *jDir;
+ int len = strlen(name); /* name is assumed slash free */
+ unsigned int nhash = full_name_hash(name, len);
+ u32 version = 0;
+ u32 inode = 0;
+
+ /* we need to search all and return the inode with the highest version */
+ for (jDir = (struct b_dirent *)pL->dir.listHead; jDir; jDir = jDir->next) {
+ if ((pino == jDir->pino) && (jDir->ino) && /* 0 for unlink */
+ (len == jDir->nsize) && (nhash == jDir->nhash)) {
+ /* TODO: compare name */
+ if (jDir->version < version)
+ continue;
+
+ if (jDir->version == version && inode != 0) {
+ /* I'm pretty sure this isn't legal */
+ putstr(" ** ERROR ** ");
+/* putnstr(jDir->name, jDir->nsize); */
+/* putLabeledWord(" has dup version =", version); */
+ }
+ inode = jDir->ino;
+ version = jDir->version;
+ }
+ }
+ return inode;
+}
+
+char *mkmodestr(unsigned long mode, char *str)
+{
+ static const char *l = "xwr";
+ int mask = 1, i;
+ char c;
+
+ switch (mode & S_IFMT) {
+ case S_IFDIR: str[0] = 'd'; break;
+ case S_IFBLK: str[0] = 'b'; break;
+ case S_IFCHR: str[0] = 'c'; break;
+ case S_IFIFO: str[0] = 'f'; break;
+ case S_IFLNK: str[0] = 'l'; break;
+ case S_IFSOCK: str[0] = 's'; break;
+ case S_IFREG: str[0] = '-'; break;
+ default: str[0] = '?';
+ }
+
+ for(i = 0; i < 9; i++) {
+ c = l[i%3];
+ str[9-i] = (mode & mask)?c:'-';
+ mask = mask<<1;
+ }
+
+ if(mode & S_ISUID) str[3] = (mode & S_IXUSR)?'s':'S';
+ if(mode & S_ISGID) str[6] = (mode & S_IXGRP)?'s':'S';
+ if(mode & S_ISVTX) str[9] = (mode & S_IXOTH)?'t':'T';
+ str[10] = '\0';
+ return str;
+}
+
+static inline void dump_stat(struct stat *st, const char *name)
+{
+ char str[20];
+ char s[64], *p;
+
+ if (st->st_mtime == (time_t)(-1)) /* some ctimes really hate -1 */
+ st->st_mtime = 1;
+
+ ctime_r(&st->st_mtime, s/*,64*/); /* newlib ctime doesn't have buflen */
+
+ if ((p = strchr(s,'\n')) != NULL) *p = '\0';
+ if ((p = strchr(s,'\r')) != NULL) *p = '\0';
+
+/*
+ printf("%6lo %s %8ld %s %s\n", st->st_mode, mkmodestr(st->st_mode, str),
+ st->st_size, s, name);
+*/
+
+ printf(" %s %8ld %s %s", mkmodestr(st->st_mode,str), st->st_size, s, name);
+}
+
+static inline int
+dump_inode(struct b_lists *pL, struct b_dirent *d, struct b_inode *i)
+{
+ char fname[JFFS2_MAX_NAME_LEN + 1];
+ struct stat st;
+ size_t len;
+
+ if(!d || !i) return -1;
+ len = d->nsize;
+ nand_read(nand, d->offset + sizeof(struct jffs2_raw_dirent),
+ &len, &fname);
+ fname[d->nsize] = '\0';
+
+ memset(&st, 0, sizeof(st));
+
+ jffs2_1pass_read_inode(pL, i->ino, NULL, &st);
+
+ dump_stat(&st, fname);
+/* FIXME
+ if (d->type == DT_LNK) {
+ unsigned char *src = (unsigned char *) (&i[1]);
+ putstr(" -> ");
+ putnstr(src, (int)i->dsize);
+ }
+*/
+ putstr("\r\n");
+
+ return 0;
+}
+
+/* list inodes with the given pino */
+static u32
+jffs2_1pass_list_inodes(struct b_lists * pL, u32 pino)
+{
+ struct b_dirent *jDir;
+ u32 i_version = 0;
+
+ for (jDir = (struct b_dirent *)pL->dir.listHead; jDir; jDir = jDir->next) {
+ if ((pino == jDir->pino) && (jDir->ino)) { /* ino=0 -> unlink */
+ struct b_inode *jNode = (struct b_inode *)pL->frag.listHead;
+ struct b_inode *i = NULL;
+
+ while (jNode) {
+ if (jNode->ino == jDir->ino && jNode->version >= i_version) {
+ i_version = jNode->version;
+ i = jNode;
+ }
+ jNode = jNode->next;
+ }
+ dump_inode(pL, jDir, i);
+ }
+ }
+ return pino;
+}
+
+static u32
+jffs2_1pass_search_inode(struct b_lists * pL, const char *fname, u32 pino)
+{
+ int i;
+ char tmp[256];
+ char working_tmp[256];
+ char *c;
+
+ /* discard any leading slash */
+ i = 0;
+ while (fname[i] == '/')
+ i++;
+ strcpy(tmp, &fname[i]);
+
+ while ((c = (char *) strchr(tmp, '/'))) /* we are still dired searching */
+ {
+ strncpy(working_tmp, tmp, c - tmp);
+ working_tmp[c - tmp] = '\0';
+#if 0
+ putstr("search_inode: tmp = ");
+ putstr(tmp);
+ putstr("\r\n");
+ putstr("search_inode: wtmp = ");
+ putstr(working_tmp);
+ putstr("\r\n");
+ putstr("search_inode: c = ");
+ putstr(c);
+ putstr("\r\n");
+#endif
+ for (i = 0; i < strlen(c) - 1; i++)
+ tmp[i] = c[i + 1];
+ tmp[i] = '\0';
+#if 0
+ putstr("search_inode: post tmp = ");
+ putstr(tmp);
+ putstr("\r\n");
+#endif
+
+ if (!(pino = jffs2_1pass_find_inode(pL, working_tmp, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(working_tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ }
+ /* this is for the bare filename, directories have already been mapped */
+ if (!(pino = jffs2_1pass_find_inode(pL, tmp, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ return pino;
+
+}
+
+static u32
+jffs2_1pass_resolve_inode(struct b_lists * pL, u32 ino)
+{
+ struct b_dirent *jDir;
+ struct b_inode *jNode;
+ u8 jDirFoundType = 0;
+ u32 jDirFoundIno = 0;
+ u32 jDirFoundPino = 0;
+ char tmp[JFFS2_MAX_NAME_LEN + 1];
+ u32 version = 0;
+ u32 pino;
+
+ /* we need to search all and return the inode with the highest version */
+ for (jDir = (struct b_dirent *)pL->dir.listHead; jDir; jDir = jDir->next) {
+ if (ino == jDir->ino) {
+ if (jDir->version < version)
+ continue;
+
+ if (jDir->version == version && jDirFoundType) {
+ /* I'm pretty sure this isn't legal */
+ putstr(" ** ERROR ** ");
+/* putnstr(jDir->name, jDir->nsize); */
+/* putLabeledWord(" has dup version (resolve) = ", */
+/* version); */
+ }
+
+ jDirFoundType = jDir->type;
+ jDirFoundIno = jDir->ino;
+ jDirFoundPino = jDir->pino;
+ version = jDir->version;
+ }
+ }
+ /* now we found the right entry again. (shoulda returned inode*) */
+ if (jDirFoundType != DT_LNK)
+ return jDirFoundIno;
+
+ /* it's a soft link so we follow it again. */
+ for (jNode = (struct b_inode *)pL->frag.listHead; jNode; jNode = jNode->next) {
+ if (jNode->ino == jDirFoundIno) {
+ size_t len = jNode->csize;
+ nand_read(nand, jNode->offset + sizeof(struct jffs2_raw_inode), &len, &tmp);
+ tmp[jNode->csize] = '\0';
+ break;
+ }
+ }
+ /* ok so the name of the new file to find is in tmp */
+ /* if it starts with a slash it is root based else shared dirs */
+ if (tmp[0] == '/')
+ pino = 1;
+ else
+ pino = jDirFoundPino;
+
+ return jffs2_1pass_search_inode(pL, tmp, pino);
+}
+
+static u32
+jffs2_1pass_search_list_inodes(struct b_lists * pL, const char *fname, u32 pino)
+{
+ int i;
+ char tmp[256];
+ char working_tmp[256];
+ char *c;
+
+ /* discard any leading slash */
+ i = 0;
+ while (fname[i] == '/')
+ i++;
+ strcpy(tmp, &fname[i]);
+ working_tmp[0] = '\0';
+ while ((c = (char *) strchr(tmp, '/'))) /* we are still dired searching */
+ {
+ strncpy(working_tmp, tmp, c - tmp);
+ working_tmp[c - tmp] = '\0';
+ for (i = 0; i < strlen(c) - 1; i++)
+ tmp[i] = c[i + 1];
+ tmp[i] = '\0';
+ /* only a failure if we arent looking at top level */
+ if (!(pino = jffs2_1pass_find_inode(pL, working_tmp, pino)) &&
+ (working_tmp[0])) {
+ putstr("find_inode failed for name=");
+ putstr(working_tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ }
+
+ if (tmp[0] && !(pino = jffs2_1pass_find_inode(pL, tmp, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ /* this is for the bare filename, directories have already been mapped */
+ if (!(pino = jffs2_1pass_list_inodes(pL, pino))) {
+ putstr("find_inode failed for name=");
+ putstr(tmp);
+ putstr("\r\n");
+ return 0;
+ }
+ return pino;
+
+}
+
+unsigned char
+jffs2_1pass_rescan_needed(struct part_info *part)
+{
+ struct b_node *b;
+ struct jffs2_unknown_node onode;
+ struct jffs2_unknown_node *node;
+ struct b_lists *pL = (struct b_lists *)part->jffs2_priv;
+
+ if (part->jffs2_priv == 0){
+ DEBUGF ("rescan: First time in use\n");
+ return 1;
+ }
+ /* if we have no list, we need to rescan */
+ if (pL->frag.listCount == 0) {
+ DEBUGF ("rescan: fraglist zero\n");
+ return 1;
+ }
+
+ /* or if we are scanning a new partition */
+ if (pL->partOffset != part->offset) {
+ DEBUGF ("rescan: different partition\n");
+ return 1;
+ }
+
+ /* FIXME */
+#if 0
+ /* but suppose someone reflashed a partition at the same offset... */
+ b = pL->dir.listHead;
+ while (b) {
+ node = (struct jffs2_unknown_node *) get_fl_mem(b->offset,
+ sizeof(onode), &onode);
+ if (node->nodetype != JFFS2_NODETYPE_DIRENT) {
+ DEBUGF ("rescan: fs changed beneath me? (%lx)\n",
+ (unsigned long) b->offset);
+ return 1;
+ }
+ b = b->next;
+ }
+#endif
+ return 0;
+}
+
+#ifdef DEBUG_FRAGMENTS
+static void
+dump_fragments(struct b_lists *pL)
+{
+ struct b_node *b;
+ struct jffs2_raw_inode ojNode;
+ struct jffs2_raw_inode *jNode;
+
+ putstr("\r\n\r\n******The fragment Entries******\r\n");
+ b = pL->frag.listHead;
+ while (b) {
+ jNode = (struct jffs2_raw_inode *) get_fl_mem(b->offset,
+ sizeof(ojNode), &ojNode);
+ putLabeledWord("\r\n\tbuild_list: FLASH_OFFSET = ", b->offset);
+ putLabeledWord("\tbuild_list: totlen = ", jNode->totlen);
+ putLabeledWord("\tbuild_list: inode = ", jNode->ino);
+ putLabeledWord("\tbuild_list: version = ", jNode->version);
+ putLabeledWord("\tbuild_list: isize = ", jNode->isize);
+ putLabeledWord("\tbuild_list: atime = ", jNode->atime);
+ putLabeledWord("\tbuild_list: offset = ", jNode->offset);
+ putLabeledWord("\tbuild_list: csize = ", jNode->csize);
+ putLabeledWord("\tbuild_list: dsize = ", jNode->dsize);
+ putLabeledWord("\tbuild_list: compr = ", jNode->compr);
+ putLabeledWord("\tbuild_list: usercompr = ", jNode->usercompr);
+ putLabeledWord("\tbuild_list: flags = ", jNode->flags);
+ putLabeledWord("\tbuild_list: offset = ", b->offset); /* FIXME: ? [RS] */
+ b = b->next;
+ }
+}
+#endif
+
+#ifdef DEBUG_DIRENTS
+static void
+dump_dirents(struct b_lists *pL)
+{
+ struct b_node *b;
+ struct jffs2_raw_dirent *jDir;
+
+ putstr("\r\n\r\n******The directory Entries******\r\n");
+ b = pL->dir.listHead;
+ while (b) {
+ jDir = (struct jffs2_raw_dirent *) get_node_mem(b->offset);
+ putstr("\r\n");
+ putnstr(jDir->name, jDir->nsize);
+ putLabeledWord("\r\n\tbuild_list: magic = ", jDir->magic);
+ putLabeledWord("\tbuild_list: nodetype = ", jDir->nodetype);
+ putLabeledWord("\tbuild_list: hdr_crc = ", jDir->hdr_crc);
+ putLabeledWord("\tbuild_list: pino = ", jDir->pino);
+ putLabeledWord("\tbuild_list: version = ", jDir->version);
+ putLabeledWord("\tbuild_list: ino = ", jDir->ino);
+ putLabeledWord("\tbuild_list: mctime = ", jDir->mctime);
+ putLabeledWord("\tbuild_list: nsize = ", jDir->nsize);
+ putLabeledWord("\tbuild_list: type = ", jDir->type);
+ putLabeledWord("\tbuild_list: node_crc = ", jDir->node_crc);
+ putLabeledWord("\tbuild_list: name_crc = ", jDir->name_crc);
+ putLabeledWord("\tbuild_list: offset = ", b->offset); /* FIXME: ? [RS] */
+ b = b->next;
+ put_fl_mem(jDir);
+ }
+}
+#endif
+
+static int
+jffs2_fill_scan_buf(nand_info_t *nand, unsigned char *buf,
+ unsigned ofs, unsigned len)
+{
+ int ret;
+ unsigned olen;
+
+ olen = len;
+ ret = nand_read(nand, ofs, &olen, buf);
+ if (ret) {
+ printf("nand_read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret);
+ return ret;
+ }
+ if (olen < len) {
+ printf("Read at 0x%x gave only 0x%x bytes\n", ofs, olen);
+ return -1;
+ }
+ return 0;
+}
+
+#define EMPTY_SCAN_SIZE 1024
+static u32
+jffs2_1pass_build_lists(struct part_info * part)
+{
+ struct b_lists *pL;
+ struct jffs2_unknown_node *node;
+ unsigned nr_blocks, sectorsize, ofs, offset;
+ char *buf;
+ int i;
+ u32 counter = 0;
+ u32 counter4 = 0;
+ u32 counterF = 0;
+ u32 counterN = 0;
+
+ struct mtdids *id = part->dev->id;
+ nand = nand_info + id->num;
+
+ /* if we are building a list we need to refresh the cache. */
+ jffs_init_1pass_list(part);
+ pL = (struct b_lists *)part->jffs2_priv;
+ pL->partOffset = part->offset;
+ puts ("Scanning JFFS2 FS: ");
+
+ sectorsize = nand->erasesize;
+ nr_blocks = part->size / sectorsize;
+ buf = malloc(sectorsize);
+ if (!buf)
+ return 0;
+
+ for (i = 0; i < nr_blocks; i++) {
+ printf("\b\b%c ", spinner[counter++ % sizeof(spinner)]);
+
+ offset = part->offset + i * sectorsize;
+
+ if (nand_block_isbad(nand, offset))
+ continue;
+
+ if (jffs2_fill_scan_buf(nand, buf, offset, EMPTY_SCAN_SIZE))
+ return 0;
+
+ ofs = 0;
+ /* Scan only 4KiB of 0xFF before declaring it's empty */
+ while (ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
+ ofs += 4;
+ if (ofs == EMPTY_SCAN_SIZE)
+ continue;
+
+ if (jffs2_fill_scan_buf(nand, buf + EMPTY_SCAN_SIZE, offset + EMPTY_SCAN_SIZE, sectorsize - EMPTY_SCAN_SIZE))
+ return 0;
+ offset += ofs;
+
+ while (ofs < sectorsize - sizeof(struct jffs2_unknown_node)) {
+ node = (struct jffs2_unknown_node *)&buf[ofs];
+ if (node->magic != JFFS2_MAGIC_BITMASK || !hdr_crc(node)) {
+ offset += 4;
+ ofs += 4;
+ counter4++;
+ continue;
+ }
+ /* if its a fragment add it */
+ if (node->nodetype == JFFS2_NODETYPE_INODE &&
+ inode_crc((struct jffs2_raw_inode *) node)) {
+ if (insert_inode(&pL->frag, (struct jffs2_raw_inode *) node,
+ offset) == NULL) {
+ return 0;
+ }
+ } else if (node->nodetype == JFFS2_NODETYPE_DIRENT &&
+ dirent_crc((struct jffs2_raw_dirent *) node) &&
+ dirent_name_crc((struct jffs2_raw_dirent *) node)) {
+ if (! (counterN%100))
+ puts ("\b\b. ");
+ if (insert_dirent(&pL->dir, (struct jffs2_raw_dirent *) node,
+ offset) == NULL) {
+ return 0;
+ }
+ counterN++;
+ } else if (node->nodetype == JFFS2_NODETYPE_CLEANMARKER) {
+ if (node->totlen != sizeof(struct jffs2_unknown_node))
+ printf("OOPS Cleanmarker has bad size "
+ "%d != %zu\n",
+ node->totlen,
+ sizeof(struct jffs2_unknown_node));
+ } else if (node->nodetype == JFFS2_NODETYPE_PADDING) {
+ if (node->totlen < sizeof(struct jffs2_unknown_node))
+ printf("OOPS Padding has bad size "
+ "%d < %zu\n",
+ node->totlen,
+ sizeof(struct jffs2_unknown_node));
+ } else {
+ printf("Unknown node type: %x len %d offset 0x%x\n",
+ node->nodetype,
+ node->totlen, offset);
+ }
+ offset += ((node->totlen + 3) & ~3);
+ ofs += ((node->totlen + 3) & ~3);
+ counterF++;
+ }
+ }
+
+ putstr("\b\b done.\r\n"); /* close off the dots */
+
+#if 0
+ putLabeledWord("dir entries = ", pL->dir.listCount);
+ putLabeledWord("frag entries = ", pL->frag.listCount);
+ putLabeledWord("+4 increments = ", counter4);
+ putLabeledWord("+file_offset increments = ", counterF);
+#endif
+
+#ifdef DEBUG_DIRENTS
+ dump_dirents(pL);
+#endif
+
+#ifdef DEBUG_FRAGMENTS
+ dump_fragments(pL);
+#endif
+
+ /* give visual feedback that we are done scanning the flash */
+ led_blink(0x0, 0x0, 0x1, 0x1); /* off, forever, on 100ms, off 100ms */
+ free(buf);
+
+ return 1;
+}
+
+
+static u32
+jffs2_1pass_fill_info(struct b_lists * pL, struct b_jffs2_info * piL)
+{
+ struct b_node *b;
+ struct jffs2_raw_inode ojNode;
+ struct jffs2_raw_inode *jNode;
+ int i;
+
+ for (i = 0; i < JFFS2_NUM_COMPR; i++) {
+ piL->compr_info[i].num_frags = 0;
+ piL->compr_info[i].compr_sum = 0;
+ piL->compr_info[i].decompr_sum = 0;
+ }
+/* FIXME
+ b = pL->frag.listHead;
+ while (b) {
+ jNode = (struct jffs2_raw_inode *) get_fl_mem(b->offset,
+ sizeof(ojNode), &ojNode);
+ if (jNode->compr < JFFS2_NUM_COMPR) {
+ piL->compr_info[jNode->compr].num_frags++;
+ piL->compr_info[jNode->compr].compr_sum += jNode->csize;
+ piL->compr_info[jNode->compr].decompr_sum += jNode->dsize;
+ }
+ b = b->next;
+ }
+*/
+ return 0;
+}
+
+
+static struct b_lists *
+jffs2_get_list(struct part_info * part, const char *who)
+{
+ if (jffs2_1pass_rescan_needed(part)) {
+ if (!jffs2_1pass_build_lists(part)) {
+ printf("%s: Failed to scan JFFSv2 file structure\n", who);
+ return NULL;
+ }
+ }
+ return (struct b_lists *)part->jffs2_priv;
+}
+
+
+/* Print directory / file contents */
+u32
+jffs2_1pass_ls(struct part_info * part, const char *fname)
+{
+ struct b_lists *pl;
+ long ret = 0;
+ u32 inode;
+
+ if (! (pl = jffs2_get_list(part, "ls")))
+ return 0;
+
+ if (! (inode = jffs2_1pass_search_list_inodes(pl, fname, 1))) {
+ putstr("ls: Failed to scan jffs2 file structure\r\n");
+ return 0;
+ }
+
+#if 0
+ putLabeledWord("found file at inode = ", inode);
+ putLabeledWord("read_inode returns = ", ret);
+#endif
+
+ return ret;
+}
+
+
+/* Load a file from flash into memory. fname can be a full path */
+u32
+jffs2_1pass_load(char *dest, struct part_info * part, const char *fname)
+{
+
+ struct b_lists *pl;
+ long ret = 0;
+ u32 inode;
+
+ if (! (pl = jffs2_get_list(part, "load")))
+ return 0;
+
+ if (! (inode = jffs2_1pass_search_inode(pl, fname, 1))) {
+ putstr("load: Failed to find inode\r\n");
+ return 0;
+ }
+
+ /* Resolve symlinks */
+ if (! (inode = jffs2_1pass_resolve_inode(pl, inode))) {
+ putstr("load: Failed to resolve inode structure\r\n");
+ return 0;
+ }
+
+ if ((ret = jffs2_1pass_read_inode(pl, inode, dest, NULL)) < 0) {
+ putstr("load: Failed to read inode\r\n");
+ return 0;
+ }
+
+ DEBUGF ("load: loaded '%s' to 0x%lx (%ld bytes)\n", fname,
+ (unsigned long) dest, ret);
+ return ret;
+}
+
+/* Return information about the fs on this partition */
+u32
+jffs2_1pass_info(struct part_info * part)
+{
+ struct b_jffs2_info info;
+ struct b_lists *pl;
+ int i;
+
+ if (! (pl = jffs2_get_list(part, "info")))
+ return 0;
+
+ jffs2_1pass_fill_info(pl, &info);
+ for (i = 0; i < JFFS2_NUM_COMPR; i++) {
+ printf ("Compression: %s\n"
+ "\tfrag count: %d\n"
+ "\tcompressed sum: %d\n"
+ "\tuncompressed sum: %d\n",
+ compr_names[i],
+ info.compr_info[i].num_frags,
+ info.compr_info[i].compr_sum,
+ info.compr_info[i].decompr_sum);
+ }
+ return 1;
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/jffs2_nand_private.h b/qemu/roms/u-boot/fs/jffs2/jffs2_nand_private.h
new file mode 100644
index 000000000..18cca8d07
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/jffs2_nand_private.h
@@ -0,0 +1,133 @@
+#ifndef jffs2_private_h
+#define jffs2_private_h
+
+#include <jffs2/jffs2.h>
+
+struct b_node {
+ struct b_node *next;
+};
+
+struct b_inode {
+ struct b_inode *next;
+ u32 offset; /* physical offset to beginning of real inode */
+ u32 version;
+ u32 ino;
+ u32 isize;
+ u32 csize;
+};
+
+struct b_dirent {
+ struct b_dirent *next;
+ u32 offset; /* physical offset to beginning of real dirent */
+ u32 version;
+ u32 pino;
+ u32 ino;
+ unsigned int nhash;
+ unsigned char nsize;
+ unsigned char type;
+};
+
+struct b_list {
+ struct b_node *listTail;
+ struct b_node *listHead;
+ unsigned int listCount;
+ struct mem_block *listMemBase;
+};
+
+struct b_lists {
+ char *partOffset;
+ struct b_list dir;
+ struct b_list frag;
+};
+
+struct b_compr_info {
+ u32 num_frags;
+ u32 compr_sum;
+ u32 decompr_sum;
+};
+
+struct b_jffs2_info {
+ struct b_compr_info compr_info[JFFS2_NUM_COMPR];
+};
+
+static inline int
+hdr_crc(struct jffs2_unknown_node *node)
+{
+#if 1
+ u32 crc = crc32_no_comp(0, (unsigned char *)node, sizeof(struct jffs2_unknown_node) - 4);
+#else
+ /* what's the semantics of this? why is this here? */
+ u32 crc = crc32_no_comp(~0, (unsigned char *)node, sizeof(struct jffs2_unknown_node) - 4);
+
+ crc ^= ~0;
+#endif
+ if (node->hdr_crc != crc) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+dirent_crc(struct jffs2_raw_dirent *node)
+{
+ if (node->node_crc != crc32_no_comp(0, (unsigned char *)node, sizeof(struct jffs2_raw_dirent) - 8)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+dirent_name_crc(struct jffs2_raw_dirent *node)
+{
+ if (node->name_crc != crc32_no_comp(0, (unsigned char *)&(node->name), node->nsize)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+inode_crc(struct jffs2_raw_inode *node)
+{
+ if (node->node_crc != crc32_no_comp(0, (unsigned char *)node, sizeof(struct jffs2_raw_inode) - 8)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+/* Borrowed from include/linux/dcache.h */
+
+/* Name hashing routines. Initial hash value */
+/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
+#define init_name_hash() 0
+
+/* partial hash update function. Assume roughly 4 bits per character */
+static inline unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+ return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+
+/*
+ * Finally: cut down the number of bits to a int value (and try to avoid
+ * losing bits)
+ */
+static inline unsigned long end_name_hash(unsigned long hash)
+{
+ return (unsigned int) hash;
+}
+
+/* Compute the hash for a name string. */
+static inline unsigned int
+full_name_hash(const unsigned char *name, unsigned int len)
+{
+ unsigned long hash = init_name_hash();
+ while (len--)
+ hash = partial_name_hash(*name++, hash);
+ return end_name_hash(hash);
+}
+
+#endif /* jffs2_private.h */
diff --git a/qemu/roms/u-boot/fs/jffs2/jffs2_private.h b/qemu/roms/u-boot/fs/jffs2/jffs2_private.h
new file mode 100644
index 000000000..658b32521
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/jffs2_private.h
@@ -0,0 +1,101 @@
+#ifndef jffs2_private_h
+#define jffs2_private_h
+
+#include <jffs2/jffs2.h>
+
+
+struct b_node {
+ u32 offset;
+ struct b_node *next;
+ enum { CRC_UNKNOWN = 0, CRC_OK, CRC_BAD } datacrc;
+};
+
+struct b_list {
+ struct b_node *listTail;
+ struct b_node *listHead;
+#ifdef CONFIG_SYS_JFFS2_SORT_FRAGMENTS
+ struct b_node *listLast;
+ int (*listCompare)(struct b_node *new, struct b_node *node);
+ u32 listLoops;
+#endif
+ u32 listCount;
+ struct mem_block *listMemBase;
+};
+
+struct b_lists {
+ struct b_list dir;
+ struct b_list frag;
+ void *readbuf;
+};
+
+struct b_compr_info {
+ u32 num_frags;
+ u32 compr_sum;
+ u32 decompr_sum;
+};
+
+struct b_jffs2_info {
+ struct b_compr_info compr_info[JFFS2_NUM_COMPR];
+};
+
+static inline int
+hdr_crc(struct jffs2_unknown_node *node)
+{
+#if 1
+ u32 crc = crc32_no_comp(0, (unsigned char *)node, sizeof(struct jffs2_unknown_node) - 4);
+#else
+ /* what's the semantics of this? why is this here? */
+ u32 crc = crc32_no_comp(~0, (unsigned char *)node, sizeof(struct jffs2_unknown_node) - 4);
+
+ crc ^= ~0;
+#endif
+ if (node->hdr_crc != crc) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+dirent_crc(struct jffs2_raw_dirent *node)
+{
+ if (node->node_crc != crc32_no_comp(0, (unsigned char *)node, sizeof(struct jffs2_raw_dirent) - 8)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+dirent_name_crc(struct jffs2_raw_dirent *node)
+{
+ if (node->name_crc != crc32_no_comp(0, (unsigned char *)&(node->name), node->nsize)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+inode_crc(struct jffs2_raw_inode *node)
+{
+ if (node->node_crc != crc32_no_comp(0, (unsigned char *)node, sizeof(struct jffs2_raw_inode) - 8)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static inline int
+data_crc(struct jffs2_raw_inode *node)
+{
+ if (node->data_crc != crc32_no_comp(0, (unsigned char *)
+ ((int) &node->node_crc + sizeof (node->node_crc)),
+ node->csize)) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+#endif /* jffs2_private.h */
diff --git a/qemu/roms/u-boot/fs/jffs2/mini_inflate.c b/qemu/roms/u-boot/fs/jffs2/mini_inflate.c
new file mode 100644
index 000000000..2f13412f9
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/mini_inflate.c
@@ -0,0 +1,377 @@
+/*-------------------------------------------------------------------------
+ * Filename: mini_inflate.c
+ * Version: $Id: mini_inflate.c,v 1.3 2002/01/24 22:58:42 rfeany Exp $
+ * Copyright: Copyright (C) 2001, Russ Dill
+ * Author: Russ Dill <Russ.Dill@asu.edu>
+ * Description: Mini inflate implementation (RFC 1951)
+ *-----------------------------------------------------------------------*/
+/*
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <config.h>
+#include <jffs2/mini_inflate.h>
+
+/* The order that the code lengths in section 3.2.7 are in */
+static unsigned char huffman_order[] = {16, 17, 18, 0, 8, 7, 9, 6, 10, 5,
+ 11, 4, 12, 3, 13, 2, 14, 1, 15};
+
+inline void cramfs_memset(int *s, const int c, size n)
+{
+ n--;
+ for (;n > 0; n--) s[n] = c;
+ s[0] = c;
+}
+
+/* associate a stream with a block of data and reset the stream */
+static void init_stream(struct bitstream *stream, unsigned char *data,
+ void *(*inflate_memcpy)(void *, const void *, size))
+{
+ stream->error = NO_ERROR;
+ stream->memcpy = inflate_memcpy;
+ stream->decoded = 0;
+ stream->data = data;
+ stream->bit = 0; /* The first bit of the stream is the lsb of the
+ * first byte */
+
+ /* really sorry about all this initialization, think of a better way,
+ * let me know and it will get cleaned up */
+ stream->codes.bits = 8;
+ stream->codes.num_symbols = 19;
+ stream->codes.lengths = stream->code_lengths;
+ stream->codes.symbols = stream->code_symbols;
+ stream->codes.count = stream->code_count;
+ stream->codes.first = stream->code_first;
+ stream->codes.pos = stream->code_pos;
+
+ stream->lengths.bits = 16;
+ stream->lengths.num_symbols = 288;
+ stream->lengths.lengths = stream->length_lengths;
+ stream->lengths.symbols = stream->length_symbols;
+ stream->lengths.count = stream->length_count;
+ stream->lengths.first = stream->length_first;
+ stream->lengths.pos = stream->length_pos;
+
+ stream->distance.bits = 16;
+ stream->distance.num_symbols = 32;
+ stream->distance.lengths = stream->distance_lengths;
+ stream->distance.symbols = stream->distance_symbols;
+ stream->distance.count = stream->distance_count;
+ stream->distance.first = stream->distance_first;
+ stream->distance.pos = stream->distance_pos;
+
+}
+
+/* pull 'bits' bits out of the stream. The last bit pulled it returned as the
+ * msb. (section 3.1.1)
+ */
+inline unsigned long pull_bits(struct bitstream *stream,
+ const unsigned int bits)
+{
+ unsigned long ret;
+ int i;
+
+ ret = 0;
+ for (i = 0; i < bits; i++) {
+ ret += ((*(stream->data) >> stream->bit) & 1) << i;
+
+ /* if, before incrementing, we are on bit 7,
+ * go to the lsb of the next byte */
+ if (stream->bit++ == 7) {
+ stream->bit = 0;
+ stream->data++;
+ }
+ }
+ return ret;
+}
+
+inline int pull_bit(struct bitstream *stream)
+{
+ int ret = ((*(stream->data) >> stream->bit) & 1);
+ if (stream->bit++ == 7) {
+ stream->bit = 0;
+ stream->data++;
+ }
+ return ret;
+}
+
+/* discard bits up to the next whole byte */
+static void discard_bits(struct bitstream *stream)
+{
+ if (stream->bit != 0) {
+ stream->bit = 0;
+ stream->data++;
+ }
+}
+
+/* No decompression, the data is all literals (section 3.2.4) */
+static void decompress_none(struct bitstream *stream, unsigned char *dest)
+{
+ unsigned int length;
+
+ discard_bits(stream);
+ length = *(stream->data++);
+ length += *(stream->data++) << 8;
+ pull_bits(stream, 16); /* throw away the inverse of the size */
+
+ stream->decoded += length;
+ stream->memcpy(dest, stream->data, length);
+ stream->data += length;
+}
+
+/* Read in a symbol from the stream (section 3.2.2) */
+static int read_symbol(struct bitstream *stream, struct huffman_set *set)
+{
+ int bits = 0;
+ int code = 0;
+ while (!(set->count[bits] && code < set->first[bits] +
+ set->count[bits])) {
+ code = (code << 1) + pull_bit(stream);
+ if (++bits > set->bits) {
+ /* error decoding (corrupted data?) */
+ stream->error = CODE_NOT_FOUND;
+ return -1;
+ }
+ }
+ return set->symbols[set->pos[bits] + code - set->first[bits]];
+}
+
+/* decompress a stream of data encoded with the passed length and distance
+ * huffman codes */
+static void decompress_huffman(struct bitstream *stream, unsigned char *dest)
+{
+ struct huffman_set *lengths = &(stream->lengths);
+ struct huffman_set *distance = &(stream->distance);
+
+ int symbol, length, dist, i;
+
+ do {
+ if ((symbol = read_symbol(stream, lengths)) < 0) return;
+ if (symbol < 256) {
+ *(dest++) = symbol; /* symbol is a literal */
+ stream->decoded++;
+ } else if (symbol > 256) {
+ /* Determine the length of the repitition
+ * (section 3.2.5) */
+ if (symbol < 265) length = symbol - 254;
+ else if (symbol == 285) length = 258;
+ else {
+ length = pull_bits(stream, (symbol - 261) >> 2);
+ length += (4 << ((symbol - 261) >> 2)) + 3;
+ length += ((symbol - 1) % 4) <<
+ ((symbol - 261) >> 2);
+ }
+
+ /* Determine how far back to go */
+ if ((symbol = read_symbol(stream, distance)) < 0)
+ return;
+ if (symbol < 4) dist = symbol + 1;
+ else {
+ dist = pull_bits(stream, (symbol - 2) >> 1);
+ dist += (2 << ((symbol - 2) >> 1)) + 1;
+ dist += (symbol % 2) << ((symbol - 2) >> 1);
+ }
+ stream->decoded += length;
+ for (i = 0; i < length; i++) {
+ *dest = dest[-dist];
+ dest++;
+ }
+ }
+ } while (symbol != 256); /* 256 is the end of the data block */
+}
+
+/* Fill the lookup tables (section 3.2.2) */
+static void fill_code_tables(struct huffman_set *set)
+{
+ int code = 0, i, length;
+
+ /* fill in the first code of each bit length, and the pos pointer */
+ set->pos[0] = 0;
+ for (i = 1; i < set->bits; i++) {
+ code = (code + set->count[i - 1]) << 1;
+ set->first[i] = code;
+ set->pos[i] = set->pos[i - 1] + set->count[i - 1];
+ }
+
+ /* Fill in the table of symbols in order of their huffman code */
+ for (i = 0; i < set->num_symbols; i++) {
+ if ((length = set->lengths[i]))
+ set->symbols[set->pos[length]++] = i;
+ }
+
+ /* reset the pos pointer */
+ for (i = 1; i < set->bits; i++) set->pos[i] -= set->count[i];
+}
+
+static void init_code_tables(struct huffman_set *set)
+{
+ cramfs_memset(set->lengths, 0, set->num_symbols);
+ cramfs_memset(set->count, 0, set->bits);
+ cramfs_memset(set->first, 0, set->bits);
+}
+
+/* read in the huffman codes for dynamic decoding (section 3.2.7) */
+static void decompress_dynamic(struct bitstream *stream, unsigned char *dest)
+{
+ /* I tried my best to minimize the memory footprint here, while still
+ * keeping up performance. I really dislike the _lengths[] tables, but
+ * I see no way of eliminating them without a sizable performance
+ * impact. The first struct table keeps track of stats on each bit
+ * length. The _length table keeps a record of the bit length of each
+ * symbol. The _symbols table is for looking up symbols by the huffman
+ * code (the pos element points to the first place in the symbol table
+ * where that bit length occurs). I also hate the initization of these
+ * structs, if someone knows how to compact these, lemme know. */
+
+ struct huffman_set *codes = &(stream->codes);
+ struct huffman_set *lengths = &(stream->lengths);
+ struct huffman_set *distance = &(stream->distance);
+
+ int hlit = pull_bits(stream, 5) + 257;
+ int hdist = pull_bits(stream, 5) + 1;
+ int hclen = pull_bits(stream, 4) + 4;
+ int length, curr_code, symbol, i, last_code;
+
+ last_code = 0;
+
+ init_code_tables(codes);
+ init_code_tables(lengths);
+ init_code_tables(distance);
+
+ /* fill in the count of each bit length' as well as the lengths
+ * table */
+ for (i = 0; i < hclen; i++) {
+ length = pull_bits(stream, 3);
+ codes->lengths[huffman_order[i]] = length;
+ if (length) codes->count[length]++;
+
+ }
+ fill_code_tables(codes);
+
+ /* Do the same for the length codes, being carefull of wrap through
+ * to the distance table */
+ curr_code = 0;
+ while (curr_code < hlit) {
+ if ((symbol = read_symbol(stream, codes)) < 0) return;
+ if (symbol == 0) {
+ curr_code++;
+ last_code = 0;
+ } else if (symbol < 16) { /* Literal length */
+ lengths->lengths[curr_code] = last_code = symbol;
+ lengths->count[symbol]++;
+ curr_code++;
+ } else if (symbol == 16) { /* repeat the last symbol 3 - 6
+ * times */
+ length = 3 + pull_bits(stream, 2);
+ for (;length; length--, curr_code++)
+ if (curr_code < hlit) {
+ lengths->lengths[curr_code] =
+ last_code;
+ lengths->count[last_code]++;
+ } else { /* wrap to the distance table */
+ distance->lengths[curr_code - hlit] =
+ last_code;
+ distance->count[last_code]++;
+ }
+ } else if (symbol == 17) { /* repeat a bit length 0 */
+ curr_code += 3 + pull_bits(stream, 3);
+ last_code = 0;
+ } else { /* same, but more times */
+ curr_code += 11 + pull_bits(stream, 7);
+ last_code = 0;
+ }
+ }
+ fill_code_tables(lengths);
+
+ /* Fill the distance table, don't need to worry about wrapthrough
+ * here */
+ curr_code -= hlit;
+ while (curr_code < hdist) {
+ if ((symbol = read_symbol(stream, codes)) < 0) return;
+ if (symbol == 0) {
+ curr_code++;
+ last_code = 0;
+ } else if (symbol < 16) {
+ distance->lengths[curr_code] = last_code = symbol;
+ distance->count[symbol]++;
+ curr_code++;
+ } else if (symbol == 16) {
+ length = 3 + pull_bits(stream, 2);
+ for (;length; length--, curr_code++) {
+ distance->lengths[curr_code] =
+ last_code;
+ distance->count[last_code]++;
+ }
+ } else if (symbol == 17) {
+ curr_code += 3 + pull_bits(stream, 3);
+ last_code = 0;
+ } else {
+ curr_code += 11 + pull_bits(stream, 7);
+ last_code = 0;
+ }
+ }
+ fill_code_tables(distance);
+
+ decompress_huffman(stream, dest);
+}
+
+/* fill in the length and distance huffman codes for fixed encoding
+ * (section 3.2.6) */
+static void decompress_fixed(struct bitstream *stream, unsigned char *dest)
+{
+ /* let gcc fill in the initial values */
+ struct huffman_set *lengths = &(stream->lengths);
+ struct huffman_set *distance = &(stream->distance);
+
+ cramfs_memset(lengths->count, 0, 16);
+ cramfs_memset(lengths->first, 0, 16);
+ cramfs_memset(lengths->lengths, 8, 144);
+ cramfs_memset(lengths->lengths + 144, 9, 112);
+ cramfs_memset(lengths->lengths + 256, 7, 24);
+ cramfs_memset(lengths->lengths + 280, 8, 8);
+ lengths->count[7] = 24;
+ lengths->count[8] = 152;
+ lengths->count[9] = 112;
+
+ cramfs_memset(distance->count, 0, 16);
+ cramfs_memset(distance->first, 0, 16);
+ cramfs_memset(distance->lengths, 5, 32);
+ distance->count[5] = 32;
+
+
+ fill_code_tables(lengths);
+ fill_code_tables(distance);
+
+
+ decompress_huffman(stream, dest);
+}
+
+/* returns the number of bytes decoded, < 0 if there was an error. Note that
+ * this function assumes that the block starts on a byte boundry
+ * (non-compliant, but I don't see where this would happen). section 3.2.3 */
+long decompress_block(unsigned char *dest, unsigned char *source,
+ void *(*inflate_memcpy)(void *, const void *, size))
+{
+ int bfinal, btype;
+ struct bitstream stream;
+
+ init_stream(&stream, source, inflate_memcpy);
+ do {
+ bfinal = pull_bit(&stream);
+ btype = pull_bits(&stream, 2);
+ if (btype == NO_COMP) decompress_none(&stream, dest + stream.decoded);
+ else if (btype == DYNAMIC_COMP)
+ decompress_dynamic(&stream, dest + stream.decoded);
+ else if (btype == FIXED_COMP) decompress_fixed(&stream, dest + stream.decoded);
+ else stream.error = COMP_UNKNOWN;
+ } while (!bfinal && !stream.error);
+
+#if 0
+ putstr("decompress_block start\r\n");
+ putLabeledWord("stream.error = ",stream.error);
+ putLabeledWord("stream.decoded = ",stream.decoded);
+ putLabeledWord("dest = ",dest);
+ putstr("decompress_block end\r\n");
+#endif
+ return stream.error ? -stream.error : stream.decoded;
+}
diff --git a/qemu/roms/u-boot/fs/jffs2/summary.h b/qemu/roms/u-boot/fs/jffs2/summary.h
new file mode 100644
index 000000000..834933cd1
--- /dev/null
+++ b/qemu/roms/u-boot/fs/jffs2/summary.h
@@ -0,0 +1,163 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * Zoltan Sogor <weth@inf.u-szeged.hu>,
+ * Patrik Kluba <pajko@halom.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+
+#ifndef JFFS2_SUMMARY_H
+#define JFFS2_SUMMARY_H
+
+#define BLK_STATE_ALLFF 0
+#define BLK_STATE_CLEAN 1
+#define BLK_STATE_PARTDIRTY 2
+#define BLK_STATE_CLEANMARKER 3
+#define BLK_STATE_ALLDIRTY 4
+#define BLK_STATE_BADBLOCK 5
+
+#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff
+#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash))
+#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x))
+#define JFFS2_SUMMARY_XATTR_SIZE (sizeof(struct jffs2_sum_xattr_flash))
+#define JFFS2_SUMMARY_XREF_SIZE (sizeof(struct jffs2_sum_xref_flash))
+
+/* Summary structures used on flash */
+
+struct jffs2_sum_unknown_flash
+{
+ __u16 nodetype; /* node type */
+};
+
+struct jffs2_sum_inode_flash
+{
+ __u16 nodetype; /* node type */
+ __u32 inode; /* inode number */
+ __u32 version; /* inode version */
+ __u32 offset; /* offset on jeb */
+ __u32 totlen; /* record length */
+} __attribute__((packed));
+
+struct jffs2_sum_dirent_flash
+{
+ __u16 nodetype; /* == JFFS_NODETYPE_DIRENT */
+ __u32 totlen; /* record length */
+ __u32 offset; /* offset on jeb */
+ __u32 pino; /* parent inode */
+ __u32 version; /* dirent version */
+ __u32 ino; /* == zero for unlink */
+ uint8_t nsize; /* dirent name size */
+ uint8_t type; /* dirent type */
+ uint8_t name[0]; /* dirent name */
+} __attribute__((packed));
+
+struct jffs2_sum_xattr_flash
+{
+ __u16 nodetype; /* == JFFS2_NODETYPE_XATR */
+ __u32 xid; /* xattr identifier */
+ __u32 version; /* version number */
+ __u32 offset; /* offset on jeb */
+ __u32 totlen; /* node length */
+} __attribute__((packed));
+
+struct jffs2_sum_xref_flash
+{
+ __u16 nodetype; /* == JFFS2_NODETYPE_XREF */
+ __u32 offset; /* offset on jeb */
+} __attribute__((packed));
+
+union jffs2_sum_flash
+{
+ struct jffs2_sum_unknown_flash u;
+ struct jffs2_sum_inode_flash i;
+ struct jffs2_sum_dirent_flash d;
+ struct jffs2_sum_xattr_flash x;
+ struct jffs2_sum_xref_flash r;
+};
+
+/* Summary structures used in the memory */
+
+struct jffs2_sum_unknown_mem
+{
+ union jffs2_sum_mem *next;
+ __u16 nodetype; /* node type */
+};
+
+struct jffs2_sum_inode_mem
+{
+ union jffs2_sum_mem *next;
+ __u16 nodetype; /* node type */
+ __u32 inode; /* inode number */
+ __u32 version; /* inode version */
+ __u32 offset; /* offset on jeb */
+ __u32 totlen; /* record length */
+} __attribute__((packed));
+
+struct jffs2_sum_dirent_mem
+{
+ union jffs2_sum_mem *next;
+ __u16 nodetype; /* == JFFS_NODETYPE_DIRENT */
+ __u32 totlen; /* record length */
+ __u32 offset; /* ofset on jeb */
+ __u32 pino; /* parent inode */
+ __u32 version; /* dirent version */
+ __u32 ino; /* == zero for unlink */
+ uint8_t nsize; /* dirent name size */
+ uint8_t type; /* dirent type */
+ uint8_t name[0]; /* dirent name */
+} __attribute__((packed));
+
+struct jffs2_sum_xattr_mem
+{
+ union jffs2_sum_mem *next;
+ __u16 nodetype;
+ __u32 xid;
+ __u32 version;
+ __u32 offset;
+ __u32 totlen;
+} __attribute__((packed));
+
+struct jffs2_sum_xref_mem
+{
+ union jffs2_sum_mem *next;
+ __u16 nodetype;
+ __u32 offset;
+} __attribute__((packed));
+
+union jffs2_sum_mem
+{
+ struct jffs2_sum_unknown_mem u;
+ struct jffs2_sum_inode_mem i;
+ struct jffs2_sum_dirent_mem d;
+ struct jffs2_sum_xattr_mem x;
+ struct jffs2_sum_xref_mem r;
+};
+
+/* Summary related information stored in superblock */
+
+struct jffs2_summary
+{
+ uint32_t sum_size; /* collected summary information for nextblock */
+ uint32_t sum_num;
+ uint32_t sum_padded;
+ union jffs2_sum_mem *sum_list_head;
+ union jffs2_sum_mem *sum_list_tail;
+
+ __u32 *sum_buf; /* buffer for writing out summary */
+};
+
+/* Summary marker is stored at the end of every sumarized erase block */
+
+struct jffs2_sum_marker
+{
+ __u32 offset; /* offset of the summary node in the jeb */
+ __u32 magic; /* == JFFS2_SUM_MAGIC */
+};
+
+#define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker))
+
+#endif /* JFFS2_SUMMARY_H */
diff --git a/qemu/roms/u-boot/fs/reiserfs/Makefile b/qemu/roms/u-boot/fs/reiserfs/Makefile
new file mode 100644
index 000000000..5a692f0ee
--- /dev/null
+++ b/qemu/roms/u-boot/fs/reiserfs/Makefile
@@ -0,0 +1,12 @@
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# (C) Copyright 2003
+# Pavel Bartusek, Sysgo Real-Time Solutions AG, pba@sysgo.de
+#
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := reiserfs.o dev.o mode_string.o
diff --git a/qemu/roms/u-boot/fs/reiserfs/dev.c b/qemu/roms/u-boot/fs/reiserfs/dev.c
new file mode 100644
index 000000000..68255458d
--- /dev/null
+++ b/qemu/roms/u-boot/fs/reiserfs/dev.c
@@ -0,0 +1,98 @@
+/*
+ * (C) Copyright 2003 - 2004
+ * Sysgo AG, <www.elinos.com>, Pavel Bartusek <pba@sysgo.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+
+#include <common.h>
+#include <config.h>
+#include <reiserfs.h>
+
+#include "reiserfs_private.h"
+
+static block_dev_desc_t *reiserfs_block_dev_desc;
+static disk_partition_t *part_info;
+
+
+void reiserfs_set_blk_dev(block_dev_desc_t *rbdd, disk_partition_t *info)
+{
+ reiserfs_block_dev_desc = rbdd;
+ part_info = info;
+}
+
+
+int reiserfs_devread (int sector, int byte_offset, int byte_len, char *buf)
+{
+ char sec_buf[SECTOR_SIZE];
+ unsigned block_len;
+/*
+ unsigned len = byte_len;
+ u8 *start = buf;
+*/
+ /*
+ * Check partition boundaries
+ */
+ if (sector < 0
+ || ((sector + ((byte_offset + byte_len - 1) >> SECTOR_BITS))
+ >= part_info->size)) {
+/* errnum = ERR_OUTSIDE_PART; */
+ printf (" ** reiserfs_devread() read outside partition\n");
+ return 0;
+ }
+
+ /*
+ * Get the read to the beginning of a partition.
+ */
+ sector += byte_offset >> SECTOR_BITS;
+ byte_offset &= SECTOR_SIZE - 1;
+
+#if defined(DEBUG)
+ printf (" <%d, %d, %d> ", sector, byte_offset, byte_len);
+#endif
+
+
+ if (reiserfs_block_dev_desc == NULL)
+ return 0;
+
+
+ if (byte_offset != 0) {
+ /* read first part which isn't aligned with start of sector */
+ if (reiserfs_block_dev_desc->block_read(reiserfs_block_dev_desc->dev,
+ part_info->start + sector, 1,
+ (unsigned long *)sec_buf) != 1) {
+ printf (" ** reiserfs_devread() read error\n");
+ return 0;
+ }
+ memcpy(buf, sec_buf+byte_offset, min(SECTOR_SIZE-byte_offset, byte_len));
+ buf+=min(SECTOR_SIZE-byte_offset, byte_len);
+ byte_len-=min(SECTOR_SIZE-byte_offset, byte_len);
+ sector++;
+ }
+
+ /* read sector aligned part */
+ block_len = byte_len & ~(SECTOR_SIZE-1);
+ if (reiserfs_block_dev_desc->block_read(reiserfs_block_dev_desc->dev,
+ part_info->start + sector, block_len/SECTOR_SIZE,
+ (unsigned long *)buf) != block_len/SECTOR_SIZE) {
+ printf (" ** reiserfs_devread() read error - block\n");
+ return 0;
+ }
+ buf+=block_len;
+ byte_len-=block_len;
+ sector+= block_len/SECTOR_SIZE;
+
+ if ( byte_len != 0 ) {
+ /* read rest of data which are not in whole sector */
+ if (reiserfs_block_dev_desc->block_read(reiserfs_block_dev_desc->dev,
+ part_info->start + sector, 1,
+ (unsigned long *)sec_buf) != 1) {
+ printf (" ** reiserfs_devread() read error - last part\n");
+ return 0;
+ }
+ memcpy(buf, sec_buf, byte_len);
+ }
+
+ return 1;
+}
diff --git a/qemu/roms/u-boot/fs/reiserfs/mode_string.c b/qemu/roms/u-boot/fs/reiserfs/mode_string.c
new file mode 100644
index 000000000..ec18dc16e
--- /dev/null
+++ b/qemu/roms/u-boot/fs/reiserfs/mode_string.c
@@ -0,0 +1,125 @@
+/*
+ * mode_string implementation for busybox
+ *
+ * Copyright (C) 2003 Manuel Novoa III <mjn3@codepoet.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/* Aug 13, 2003
+ * Fix a bug reported by junkio@cox.net involving the mode_chars index.
+ */
+
+
+#include <common.h>
+#include <linux/stat.h>
+
+#if ( S_ISUID != 04000 ) || ( S_ISGID != 02000 ) || ( S_ISVTX != 01000 ) \
+ || ( S_IRUSR != 00400 ) || ( S_IWUSR != 00200 ) || ( S_IXUSR != 00100 ) \
+ || ( S_IRGRP != 00040 ) || ( S_IWGRP != 00020 ) || ( S_IXGRP != 00010 ) \
+ || ( S_IROTH != 00004 ) || ( S_IWOTH != 00002 ) || ( S_IXOTH != 00001 )
+#error permission bitflag value assumption(s) violated!
+#endif
+
+#if ( S_IFSOCK!= 0140000 ) || ( S_IFLNK != 0120000 ) \
+ || ( S_IFREG != 0100000 ) || ( S_IFBLK != 0060000 ) \
+ || ( S_IFDIR != 0040000 ) || ( S_IFCHR != 0020000 ) \
+ || ( S_IFIFO != 0010000 )
+#warning mode type bitflag value assumption(s) violated! falling back to larger version
+
+#if (S_IRWXU | S_IRWXG | S_IRWXO | S_ISUID | S_ISGID | S_ISVTX) == 07777
+#undef mode_t
+#define mode_t unsigned short
+#endif
+
+static const mode_t mode_flags[] = {
+ S_IRUSR, S_IWUSR, S_IXUSR, S_ISUID,
+ S_IRGRP, S_IWGRP, S_IXGRP, S_ISGID,
+ S_IROTH, S_IWOTH, S_IXOTH, S_ISVTX
+};
+
+/* The static const char arrays below are duplicated for the two cases
+ * because moving them ahead of the mode_flags declaration cause a text
+ * size increase with the gcc version I'm using. */
+
+/* The previous version used "0pcCd?bB-?l?s???". However, the '0', 'C',
+ * and 'B' types don't appear to be available on linux. So I removed them. */
+static const char type_chars[16] = "?pc?d?b?-?l?s???";
+/* 0123456789abcdef */
+static const char mode_chars[7] = "rwxSTst";
+
+const char *bb_mode_string(int mode)
+{
+ static char buf[12];
+ char *p = buf;
+
+ int i, j, k;
+
+ *p = type_chars[ (mode >> 12) & 0xf ];
+ i = 0;
+ do {
+ j = k = 0;
+ do {
+ *++p = '-';
+ if (mode & mode_flags[i+j]) {
+ *p = mode_chars[j];
+ k = j;
+ }
+ } while (++j < 3);
+ if (mode & mode_flags[i+j]) {
+ *p = mode_chars[3 + (k & 2) + ((i&8) >> 3)];
+ }
+ i += 4;
+ } while (i < 12);
+
+ /* Note: We don't bother with nul termination because bss initialization
+ * should have taken care of that for us. If the user scribbled in buf
+ * memory, they deserve whatever happens. But we'll at least assert. */
+ if (buf[10] != 0) return NULL;
+
+ return buf;
+}
+
+#else
+
+/* The previous version used "0pcCd?bB-?l?s???". However, the '0', 'C',
+ * and 'B' types don't appear to be available on linux. So I removed them. */
+static const char type_chars[16] = "?pc?d?b?-?l?s???";
+/* 0123456789abcdef */
+static const char mode_chars[7] = "rwxSTst";
+
+const char *bb_mode_string(int mode)
+{
+ static char buf[12];
+ char *p = buf;
+
+ int i, j, k, m;
+
+ *p = type_chars[ (mode >> 12) & 0xf ];
+ i = 0;
+ m = 0400;
+ do {
+ j = k = 0;
+ do {
+ *++p = '-';
+ if (mode & m) {
+ *p = mode_chars[j];
+ k = j;
+ }
+ m >>= 1;
+ } while (++j < 3);
+ ++i;
+ if (mode & (010000 >> i)) {
+ *p = mode_chars[3 + (k & 2) + (i == 3)];
+ }
+ } while (i < 3);
+
+ /* Note: We don't bother with nul termination because bss initialization
+ * should have taken care of that for us. If the user scribbled in buf
+ * memory, they deserve whatever happens. But we'll at least assert. */
+ if (buf[10] != 0) return NULL;
+
+ return buf;
+}
+
+#endif
diff --git a/qemu/roms/u-boot/fs/reiserfs/reiserfs.c b/qemu/roms/u-boot/fs/reiserfs/reiserfs.c
new file mode 100644
index 000000000..1d6fa083c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/reiserfs/reiserfs.c
@@ -0,0 +1,972 @@
+/*
+ * Copyright 2000-2002 by Hans Reiser, licensing governed by reiserfs/README
+ *
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ *
+ * (C) Copyright 2003 - 2004
+ * Sysgo AG, <www.elinos.com>, Pavel Bartusek <pba@sysgo.com>
+ *
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/* An implementation for the ReiserFS filesystem ported from GRUB.
+ * Some parts of this code (mainly the structures and defines) are
+ * from the original reiser fs code, as found in the linux kernel.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/ctype.h>
+#include <linux/time.h>
+#include <asm/byteorder.h>
+#include <reiserfs.h>
+
+#include "reiserfs_private.h"
+
+#undef REISERDEBUG
+
+/* Some parts of this code (mainly the structures and defines) are
+ * from the original reiser fs code, as found in the linux kernel.
+ */
+
+static char fsys_buf[FSYS_BUFLEN];
+static reiserfs_error_t errnum = ERR_NONE;
+static int print_possibilities;
+static unsigned int filepos, filemax;
+
+static int
+substring (const char *s1, const char *s2)
+{
+ while (*s1 == *s2)
+ {
+ /* The strings match exactly. */
+ if (! *(s1++))
+ return 0;
+ s2 ++;
+ }
+
+ /* S1 is a substring of S2. */
+ if (*s1 == 0)
+ return -1;
+
+ /* S1 isn't a substring. */
+ return 1;
+}
+
+static void sd_print_item (struct item_head * ih, char * item)
+{
+ char filetime[30];
+ time_t ttime;
+
+ if (stat_data_v1 (ih)) {
+ struct stat_data_v1 * sd = (struct stat_data_v1 *)item;
+ ttime = sd_v1_mtime(sd);
+ ctime_r(&ttime, filetime);
+ printf ("%-10s %4hd %6d %6d %9d %24.24s",
+ bb_mode_string(sd_v1_mode(sd)), sd_v1_nlink(sd),sd_v1_uid(sd), sd_v1_gid(sd),
+ sd_v1_size(sd), filetime);
+ } else {
+ struct stat_data * sd = (struct stat_data *)item;
+ ttime = sd_v2_mtime(sd);
+ ctime_r(&ttime, filetime);
+ printf ("%-10s %4d %6d %6d %9d %24.24s",
+ bb_mode_string(sd_v2_mode(sd)), sd_v2_nlink(sd),sd_v2_uid(sd),sd_v2_gid(sd),
+ (__u32) sd_v2_size(sd), filetime);
+ }
+}
+
+static int
+journal_read (int block, int len, char *buffer)
+{
+ return reiserfs_devread ((INFO->journal_block + block) << INFO->blocksize_shift,
+ 0, len, buffer);
+}
+
+/* Read a block from ReiserFS file system, taking the journal into
+ * account. If the block nr is in the journal, the block from the
+ * journal taken.
+ */
+static int
+block_read (unsigned int blockNr, int start, int len, char *buffer)
+{
+ int transactions = INFO->journal_transactions;
+ int desc_block = INFO->journal_first_desc;
+ int journal_mask = INFO->journal_block_count - 1;
+ int translatedNr = blockNr;
+ __u32 *journal_table = JOURNAL_START;
+ while (transactions-- > 0)
+ {
+ int i = 0;
+ int j_len;
+ if (__le32_to_cpu(*journal_table) != 0xffffffff)
+ {
+ /* Search for the blockNr in cached journal */
+ j_len = __le32_to_cpu(*journal_table++);
+ while (i++ < j_len)
+ {
+ if (__le32_to_cpu(*journal_table++) == blockNr)
+ {
+ journal_table += j_len - i;
+ goto found;
+ }
+ }
+ }
+ else
+ {
+ /* This is the end of cached journal marker. The remaining
+ * transactions are still on disk.
+ */
+ struct reiserfs_journal_desc desc;
+ struct reiserfs_journal_commit commit;
+
+ if (! journal_read (desc_block, sizeof (desc), (char *) &desc))
+ return 0;
+
+ j_len = __le32_to_cpu(desc.j_len);
+ while (i < j_len && i < JOURNAL_TRANS_HALF)
+ if (__le32_to_cpu(desc.j_realblock[i++]) == blockNr)
+ goto found;
+
+ if (j_len >= JOURNAL_TRANS_HALF)
+ {
+ int commit_block = (desc_block + 1 + j_len) & journal_mask;
+ if (! journal_read (commit_block,
+ sizeof (commit), (char *) &commit))
+ return 0;
+ while (i < j_len)
+ if (__le32_to_cpu(commit.j_realblock[i++ - JOURNAL_TRANS_HALF]) == blockNr)
+ goto found;
+ }
+ }
+ goto not_found;
+
+ found:
+ translatedNr = INFO->journal_block + ((desc_block + i) & journal_mask);
+#ifdef REISERDEBUG
+ printf ("block_read: block %d is mapped to journal block %d.\n",
+ blockNr, translatedNr - INFO->journal_block);
+#endif
+ /* We must continue the search, as this block may be overwritten
+ * in later transactions.
+ */
+ not_found:
+ desc_block = (desc_block + 2 + j_len) & journal_mask;
+ }
+ return reiserfs_devread (translatedNr << INFO->blocksize_shift, start, len, buffer);
+}
+
+/* Init the journal data structure. We try to cache as much as
+ * possible in the JOURNAL_START-JOURNAL_END space, but if it is full
+ * we can still read the rest from the disk on demand.
+ *
+ * The first number of valid transactions and the descriptor block of the
+ * first valid transaction are held in INFO. The transactions are all
+ * adjacent, but we must take care of the journal wrap around.
+ */
+static int
+journal_init (void)
+{
+ unsigned int block_count = INFO->journal_block_count;
+ unsigned int desc_block;
+ unsigned int commit_block;
+ unsigned int next_trans_id;
+ struct reiserfs_journal_header header;
+ struct reiserfs_journal_desc desc;
+ struct reiserfs_journal_commit commit;
+ __u32 *journal_table = JOURNAL_START;
+
+ journal_read (block_count, sizeof (header), (char *) &header);
+ desc_block = __le32_to_cpu(header.j_first_unflushed_offset);
+ if (desc_block >= block_count)
+ return 0;
+
+ INFO->journal_first_desc = desc_block;
+ next_trans_id = __le32_to_cpu(header.j_last_flush_trans_id) + 1;
+
+#ifdef REISERDEBUG
+ printf ("journal_init: last flushed %d\n",
+ __le32_to_cpu(header.j_last_flush_trans_id));
+#endif
+
+ while (1)
+ {
+ journal_read (desc_block, sizeof (desc), (char *) &desc);
+ if (substring (JOURNAL_DESC_MAGIC, desc.j_magic) > 0
+ || __le32_to_cpu(desc.j_trans_id) != next_trans_id
+ || __le32_to_cpu(desc.j_mount_id) != __le32_to_cpu(header.j_mount_id))
+ /* no more valid transactions */
+ break;
+
+ commit_block = (desc_block + __le32_to_cpu(desc.j_len) + 1) & (block_count - 1);
+ journal_read (commit_block, sizeof (commit), (char *) &commit);
+ if (__le32_to_cpu(desc.j_trans_id) != commit.j_trans_id
+ || __le32_to_cpu(desc.j_len) != __le32_to_cpu(commit.j_len))
+ /* no more valid transactions */
+ break;
+
+#ifdef REISERDEBUG
+ printf ("Found valid transaction %d/%d at %d.\n",
+ __le32_to_cpu(desc.j_trans_id), __le32_to_cpu(desc.j_mount_id), desc_block);
+#endif
+
+ next_trans_id++;
+ if (journal_table < JOURNAL_END)
+ {
+ if ((journal_table + 1 + __le32_to_cpu(desc.j_len)) >= JOURNAL_END)
+ {
+ /* The table is almost full; mark the end of the cached
+ * journal.*/
+ *journal_table = __cpu_to_le32(0xffffffff);
+ journal_table = JOURNAL_END;
+ }
+ else
+ {
+ unsigned int i;
+ /* Cache the length and the realblock numbers in the table.
+ * The block number of descriptor can easily be computed.
+ * and need not to be stored here.
+ */
+
+ /* both are in the little endian format */
+ *journal_table++ = desc.j_len;
+ for (i = 0; i < __le32_to_cpu(desc.j_len) && i < JOURNAL_TRANS_HALF; i++)
+ {
+ /* both are in the little endian format */
+ *journal_table++ = desc.j_realblock[i];
+#ifdef REISERDEBUG
+ printf ("block %d is in journal %d.\n",
+ __le32_to_cpu(desc.j_realblock[i]), desc_block);
+#endif
+ }
+ for ( ; i < __le32_to_cpu(desc.j_len); i++)
+ {
+ /* both are in the little endian format */
+ *journal_table++ = commit.j_realblock[i-JOURNAL_TRANS_HALF];
+#ifdef REISERDEBUG
+ printf ("block %d is in journal %d.\n",
+ __le32_to_cpu(commit.j_realblock[i-JOURNAL_TRANS_HALF]),
+ desc_block);
+#endif
+ }
+ }
+ }
+ desc_block = (commit_block + 1) & (block_count - 1);
+ }
+#ifdef REISERDEBUG
+ printf ("Transaction %d/%d at %d isn't valid.\n",
+ __le32_to_cpu(desc.j_trans_id), __le32_to_cpu(desc.j_mount_id), desc_block);
+#endif
+
+ INFO->journal_transactions
+ = next_trans_id - __le32_to_cpu(header.j_last_flush_trans_id) - 1;
+ return errnum == 0;
+}
+
+/* check filesystem types and read superblock into memory buffer */
+int
+reiserfs_mount (unsigned part_length)
+{
+ struct reiserfs_super_block super;
+ int superblock = REISERFS_DISK_OFFSET_IN_BYTES >> SECTOR_BITS;
+ char *cache;
+
+ if (part_length < superblock + (sizeof (super) >> SECTOR_BITS)
+ || ! reiserfs_devread (superblock, 0, sizeof (struct reiserfs_super_block),
+ (char *) &super)
+ || (substring (REISER3FS_SUPER_MAGIC_STRING, super.s_magic) > 0
+ && substring (REISER2FS_SUPER_MAGIC_STRING, super.s_magic) > 0
+ && substring (REISERFS_SUPER_MAGIC_STRING, super.s_magic) > 0)
+ || (/* check that this is not a copy inside the journal log */
+ sb_journal_block(&super) * sb_blocksize(&super)
+ <= REISERFS_DISK_OFFSET_IN_BYTES))
+ {
+ /* Try old super block position */
+ superblock = REISERFS_OLD_DISK_OFFSET_IN_BYTES >> SECTOR_BITS;
+ if (part_length < superblock + (sizeof (super) >> SECTOR_BITS)
+ || ! reiserfs_devread (superblock, 0, sizeof (struct reiserfs_super_block),
+ (char *) &super))
+ return 0;
+
+ if (substring (REISER2FS_SUPER_MAGIC_STRING, super.s_magic) > 0
+ && substring (REISERFS_SUPER_MAGIC_STRING, super.s_magic) > 0)
+ {
+ /* pre journaling super block ? */
+ if (substring (REISERFS_SUPER_MAGIC_STRING,
+ (char*) ((int) &super + 20)) > 0)
+ return 0;
+
+ set_sb_blocksize(&super, REISERFS_OLD_BLOCKSIZE);
+ set_sb_journal_block(&super, 0);
+ set_sb_version(&super, 0);
+ }
+ }
+
+ /* check the version number. */
+ if (sb_version(&super) > REISERFS_MAX_SUPPORTED_VERSION)
+ return 0;
+
+ INFO->version = sb_version(&super);
+ INFO->blocksize = sb_blocksize(&super);
+ INFO->fullblocksize_shift = log2 (sb_blocksize(&super));
+ INFO->blocksize_shift = INFO->fullblocksize_shift - SECTOR_BITS;
+ INFO->cached_slots =
+ (FSYSREISER_CACHE_SIZE >> INFO->fullblocksize_shift) - 1;
+
+#ifdef REISERDEBUG
+ printf ("reiserfs_mount: version=%d, blocksize=%d\n",
+ INFO->version, INFO->blocksize);
+#endif /* REISERDEBUG */
+
+ /* Clear node cache. */
+ memset (INFO->blocks, 0, sizeof (INFO->blocks));
+
+ if (sb_blocksize(&super) < FSYSREISER_MIN_BLOCKSIZE
+ || sb_blocksize(&super) > FSYSREISER_MAX_BLOCKSIZE
+ || (SECTOR_SIZE << INFO->blocksize_shift) != sb_blocksize(&super))
+ return 0;
+
+ /* Initialize journal code. If something fails we end with zero
+ * journal_transactions, so we don't access the journal at all.
+ */
+ INFO->journal_transactions = 0;
+ if (sb_journal_block(&super) != 0 && super.s_journal_dev == 0)
+ {
+ INFO->journal_block = sb_journal_block(&super);
+ INFO->journal_block_count = sb_journal_size(&super);
+ if (is_power_of_two (INFO->journal_block_count))
+ journal_init ();
+
+ /* Read in super block again, maybe it is in the journal */
+ block_read (superblock >> INFO->blocksize_shift,
+ 0, sizeof (struct reiserfs_super_block), (char *) &super);
+ }
+
+ if (! block_read (sb_root_block(&super), 0, INFO->blocksize, (char*) ROOT))
+ return 0;
+
+ cache = ROOT;
+ INFO->tree_depth = __le16_to_cpu(BLOCKHEAD (cache)->blk_level);
+
+#ifdef REISERDEBUG
+ printf ("root read_in: block=%d, depth=%d\n",
+ sb_root_block(&super), INFO->tree_depth);
+#endif /* REISERDEBUG */
+
+ if (INFO->tree_depth >= MAX_HEIGHT)
+ return 0;
+ if (INFO->tree_depth == DISK_LEAF_NODE_LEVEL)
+ {
+ /* There is only one node in the whole filesystem,
+ * which is simultanously leaf and root */
+ memcpy (LEAF, ROOT, INFO->blocksize);
+ }
+ return 1;
+}
+
+/***************** TREE ACCESSING METHODS *****************************/
+
+/* I assume you are familiar with the ReiserFS tree, if not go to
+ * http://www.namesys.com/content_table.html
+ *
+ * My tree node cache is organized as following
+ * 0 ROOT node
+ * 1 LEAF node (if the ROOT is also a LEAF it is copied here
+ * 2-n other nodes on current path from bottom to top.
+ * if there is not enough space in the cache, the top most are
+ * omitted.
+ *
+ * I have only two methods to find a key in the tree:
+ * search_stat(dir_id, objectid) searches for the stat entry (always
+ * the first entry) of an object.
+ * next_key() gets the next key in tree order.
+ *
+ * This means, that I can only sequential reads of files are
+ * efficient, but this really doesn't hurt for grub.
+ */
+
+/* Read in the node at the current path and depth into the node cache.
+ * You must set INFO->blocks[depth] before.
+ */
+static char *
+read_tree_node (unsigned int blockNr, int depth)
+{
+ char* cache = CACHE(depth);
+ int num_cached = INFO->cached_slots;
+ if (depth < num_cached)
+ {
+ /* This is the cached part of the path. Check if same block is
+ * needed.
+ */
+ if (blockNr == INFO->blocks[depth])
+ return cache;
+ }
+ else
+ cache = CACHE(num_cached);
+
+#ifdef REISERDEBUG
+ printf (" next read_in: block=%d (depth=%d)\n",
+ blockNr, depth);
+#endif /* REISERDEBUG */
+ if (! block_read (blockNr, 0, INFO->blocksize, cache))
+ return 0;
+ /* Make sure it has the right node level */
+ if (__le16_to_cpu(BLOCKHEAD (cache)->blk_level) != depth)
+ {
+ errnum = ERR_FSYS_CORRUPT;
+ return 0;
+ }
+
+ INFO->blocks[depth] = blockNr;
+ return cache;
+}
+
+/* Get the next key, i.e. the key following the last retrieved key in
+ * tree order. INFO->current_ih and
+ * INFO->current_info are adapted accordingly. */
+static int
+next_key (void)
+{
+ int depth;
+ struct item_head *ih = INFO->current_ih + 1;
+ char *cache;
+
+#ifdef REISERDEBUG
+ printf ("next_key:\n old ih: key %d:%d:%d:%d version:%d\n",
+ __le32_to_cpu(INFO->current_ih->ih_key.k_dir_id),
+ __le32_to_cpu(INFO->current_ih->ih_key.k_objectid),
+ __le32_to_cpu(INFO->current_ih->ih_key.u.v1.k_offset),
+ __le32_to_cpu(INFO->current_ih->ih_key.u.v1.k_uniqueness),
+ __le16_to_cpu(INFO->current_ih->ih_version));
+#endif /* REISERDEBUG */
+
+ if (ih == &ITEMHEAD[__le16_to_cpu(BLOCKHEAD (LEAF)->blk_nr_item)])
+ {
+ depth = DISK_LEAF_NODE_LEVEL;
+ /* The last item, was the last in the leaf node.
+ * Read in the next block
+ */
+ do
+ {
+ if (depth == INFO->tree_depth)
+ {
+ /* There are no more keys at all.
+ * Return a dummy item with MAX_KEY */
+ ih = (struct item_head *) &BLOCKHEAD (LEAF)->blk_right_delim_key;
+ goto found;
+ }
+ depth++;
+#ifdef REISERDEBUG
+ printf (" depth=%d, i=%d\n", depth, INFO->next_key_nr[depth]);
+#endif /* REISERDEBUG */
+ }
+ while (INFO->next_key_nr[depth] == 0);
+
+ if (depth == INFO->tree_depth)
+ cache = ROOT;
+ else if (depth <= INFO->cached_slots)
+ cache = CACHE (depth);
+ else
+ {
+ cache = read_tree_node (INFO->blocks[depth], depth);
+ if (! cache)
+ return 0;
+ }
+
+ do
+ {
+ int nr_item = __le16_to_cpu(BLOCKHEAD (cache)->blk_nr_item);
+ int key_nr = INFO->next_key_nr[depth]++;
+#ifdef REISERDEBUG
+ printf (" depth=%d, i=%d/%d\n", depth, key_nr, nr_item);
+#endif /* REISERDEBUG */
+ if (key_nr == nr_item)
+ /* This is the last item in this block, set the next_key_nr to 0 */
+ INFO->next_key_nr[depth] = 0;
+
+ cache = read_tree_node (dc_block_number(&(DC (cache)[key_nr])), --depth);
+ if (! cache)
+ return 0;
+ }
+ while (depth > DISK_LEAF_NODE_LEVEL);
+
+ ih = ITEMHEAD;
+ }
+ found:
+ INFO->current_ih = ih;
+ INFO->current_item = &LEAF[__le16_to_cpu(ih->ih_item_location)];
+#ifdef REISERDEBUG
+ printf (" new ih: key %d:%d:%d:%d version:%d\n",
+ __le32_to_cpu(INFO->current_ih->ih_key.k_dir_id),
+ __le32_to_cpu(INFO->current_ih->ih_key.k_objectid),
+ __le32_to_cpu(INFO->current_ih->ih_key.u.v1.k_offset),
+ __le32_to_cpu(INFO->current_ih->ih_key.u.v1.k_uniqueness),
+ __le16_to_cpu(INFO->current_ih->ih_version));
+#endif /* REISERDEBUG */
+ return 1;
+}
+
+/* preconditions: reiserfs_mount already executed, therefore
+ * INFO block is valid
+ * returns: 0 if error (errnum is set),
+ * nonzero iff we were able to find the key successfully.
+ * postconditions: on a nonzero return, the current_ih and
+ * current_item fields describe the key that equals the
+ * searched key. INFO->next_key contains the next key after
+ * the searched key.
+ * side effects: messes around with the cache.
+ */
+static int
+search_stat (__u32 dir_id, __u32 objectid)
+{
+ char *cache;
+ int depth;
+ int nr_item;
+ int i;
+ struct item_head *ih;
+#ifdef REISERDEBUG
+ printf ("search_stat:\n key %d:%d:0:0\n", dir_id, objectid);
+#endif /* REISERDEBUG */
+
+ depth = INFO->tree_depth;
+ cache = ROOT;
+
+ while (depth > DISK_LEAF_NODE_LEVEL)
+ {
+ struct key *key;
+ nr_item = __le16_to_cpu(BLOCKHEAD (cache)->blk_nr_item);
+
+ key = KEY (cache);
+
+ for (i = 0; i < nr_item; i++)
+ {
+ if (__le32_to_cpu(key->k_dir_id) > dir_id
+ || (__le32_to_cpu(key->k_dir_id) == dir_id
+ && (__le32_to_cpu(key->k_objectid) > objectid
+ || (__le32_to_cpu(key->k_objectid) == objectid
+ && (__le32_to_cpu(key->u.v1.k_offset)
+ | __le32_to_cpu(key->u.v1.k_uniqueness)) > 0))))
+ break;
+ key++;
+ }
+
+#ifdef REISERDEBUG
+ printf (" depth=%d, i=%d/%d\n", depth, i, nr_item);
+#endif /* REISERDEBUG */
+ INFO->next_key_nr[depth] = (i == nr_item) ? 0 : i+1;
+ cache = read_tree_node (dc_block_number(&(DC (cache)[i])), --depth);
+ if (! cache)
+ return 0;
+ }
+
+ /* cache == LEAF */
+ nr_item = __le16_to_cpu(BLOCKHEAD (LEAF)->blk_nr_item);
+ ih = ITEMHEAD;
+ for (i = 0; i < nr_item; i++)
+ {
+ if (__le32_to_cpu(ih->ih_key.k_dir_id) == dir_id
+ && __le32_to_cpu(ih->ih_key.k_objectid) == objectid
+ && __le32_to_cpu(ih->ih_key.u.v1.k_offset) == 0
+ && __le32_to_cpu(ih->ih_key.u.v1.k_uniqueness) == 0)
+ {
+#ifdef REISERDEBUG
+ printf (" depth=%d, i=%d/%d\n", depth, i, nr_item);
+#endif /* REISERDEBUG */
+ INFO->current_ih = ih;
+ INFO->current_item = &LEAF[__le16_to_cpu(ih->ih_item_location)];
+ return 1;
+ }
+ ih++;
+ }
+ errnum = ERR_FSYS_CORRUPT;
+ return 0;
+}
+
+int
+reiserfs_read (char *buf, unsigned len)
+{
+ unsigned int blocksize;
+ unsigned int offset;
+ unsigned int to_read;
+ char *prev_buf = buf;
+
+#ifdef REISERDEBUG
+ printf ("reiserfs_read: filepos=%d len=%d, offset=%Lx\n",
+ filepos, len, (__u64) IH_KEY_OFFSET (INFO->current_ih) - 1);
+#endif /* REISERDEBUG */
+
+ if (__le32_to_cpu(INFO->current_ih->ih_key.k_objectid) != INFO->fileinfo.k_objectid
+ || IH_KEY_OFFSET (INFO->current_ih) > filepos + 1)
+ {
+ search_stat (INFO->fileinfo.k_dir_id, INFO->fileinfo.k_objectid);
+ goto get_next_key;
+ }
+
+ while (! errnum)
+ {
+ if (__le32_to_cpu(INFO->current_ih->ih_key.k_objectid) != INFO->fileinfo.k_objectid) {
+ break;
+ }
+
+ offset = filepos - IH_KEY_OFFSET (INFO->current_ih) + 1;
+ blocksize = __le16_to_cpu(INFO->current_ih->ih_item_len);
+
+#ifdef REISERDEBUG
+ printf (" loop: filepos=%d len=%d, offset=%d blocksize=%d\n",
+ filepos, len, offset, blocksize);
+#endif /* REISERDEBUG */
+
+ if (IH_KEY_ISTYPE(INFO->current_ih, TYPE_DIRECT)
+ && offset < blocksize)
+ {
+#ifdef REISERDEBUG
+ printf ("direct_read: offset=%d, blocksize=%d\n",
+ offset, blocksize);
+#endif /* REISERDEBUG */
+ to_read = blocksize - offset;
+ if (to_read > len)
+ to_read = len;
+
+ memcpy (buf, INFO->current_item + offset, to_read);
+ goto update_buf_len;
+ }
+ else if (IH_KEY_ISTYPE(INFO->current_ih, TYPE_INDIRECT))
+ {
+ blocksize = (blocksize >> 2) << INFO->fullblocksize_shift;
+#ifdef REISERDEBUG
+ printf ("indirect_read: offset=%d, blocksize=%d\n",
+ offset, blocksize);
+#endif /* REISERDEBUG */
+
+ while (offset < blocksize)
+ {
+ __u32 blocknr = __le32_to_cpu(((__u32 *) INFO->current_item)
+ [offset >> INFO->fullblocksize_shift]);
+ int blk_offset = offset & (INFO->blocksize-1);
+ to_read = INFO->blocksize - blk_offset;
+ if (to_read > len)
+ to_read = len;
+
+ /* Journal is only for meta data. Data blocks can be read
+ * directly without using block_read
+ */
+ reiserfs_devread (blocknr << INFO->blocksize_shift,
+ blk_offset, to_read, buf);
+ update_buf_len:
+ len -= to_read;
+ buf += to_read;
+ offset += to_read;
+ filepos += to_read;
+ if (len == 0)
+ goto done;
+ }
+ }
+ get_next_key:
+ next_key ();
+ }
+ done:
+ return errnum ? 0 : buf - prev_buf;
+}
+
+
+/* preconditions: reiserfs_mount already executed, therefore
+ * INFO block is valid
+ * returns: 0 if error, nonzero iff we were able to find the file successfully
+ * postconditions: on a nonzero return, INFO->fileinfo contains the info
+ * of the file we were trying to look up, filepos is 0 and filemax is
+ * the size of the file.
+ */
+static int
+reiserfs_dir (char *dirname)
+{
+ struct reiserfs_de_head *de_head;
+ char *rest, ch;
+ __u32 dir_id, objectid, parent_dir_id = 0, parent_objectid = 0;
+#ifndef STAGE1_5
+ int do_possibilities = 0;
+#endif /* ! STAGE1_5 */
+ char linkbuf[PATH_MAX]; /* buffer for following symbolic links */
+ int link_count = 0;
+ int mode;
+
+ dir_id = REISERFS_ROOT_PARENT_OBJECTID;
+ objectid = REISERFS_ROOT_OBJECTID;
+
+ while (1)
+ {
+#ifdef REISERDEBUG
+ printf ("dirname=%s\n", dirname);
+#endif /* REISERDEBUG */
+
+ /* Search for the stat info first. */
+ if (! search_stat (dir_id, objectid))
+ return 0;
+
+#ifdef REISERDEBUG
+ printf ("sd_mode=%x sd_size=%d\n",
+ stat_data_v1(INFO->current_ih) ? sd_v1_mode((struct stat_data_v1 *) INFO->current_item) :
+ sd_v2_mode((struct stat_data *) (INFO->current_item)),
+ stat_data_v1(INFO->current_ih) ? sd_v1_size((struct stat_data_v1 *) INFO->current_item) :
+ sd_v2_size((struct stat_data *) INFO->current_item)
+ );
+
+#endif /* REISERDEBUG */
+ mode = stat_data_v1(INFO->current_ih) ?
+ sd_v1_mode((struct stat_data_v1 *) INFO->current_item) :
+ sd_v2_mode((struct stat_data *) INFO->current_item);
+
+ /* If we've got a symbolic link, then chase it. */
+ if (S_ISLNK (mode))
+ {
+ unsigned int len;
+ if (++link_count > MAX_LINK_COUNT)
+ {
+ errnum = ERR_SYMLINK_LOOP;
+ return 0;
+ }
+
+ /* Get the symlink size. */
+ filemax = stat_data_v1(INFO->current_ih) ?
+ sd_v1_size((struct stat_data_v1 *) INFO->current_item) :
+ sd_v2_size((struct stat_data *) INFO->current_item);
+
+ /* Find out how long our remaining name is. */
+ len = 0;
+ while (dirname[len] && !isspace (dirname[len]))
+ len++;
+
+ if (filemax + len > sizeof (linkbuf) - 1)
+ {
+ errnum = ERR_FILELENGTH;
+ return 0;
+ }
+
+ /* Copy the remaining name to the end of the symlink data.
+ Note that DIRNAME and LINKBUF may overlap! */
+ memmove (linkbuf + filemax, dirname, len+1);
+
+ INFO->fileinfo.k_dir_id = dir_id;
+ INFO->fileinfo.k_objectid = objectid;
+ filepos = 0;
+ if (! next_key ()
+ || reiserfs_read (linkbuf, filemax) != filemax)
+ {
+ if (! errnum)
+ errnum = ERR_FSYS_CORRUPT;
+ return 0;
+ }
+
+#ifdef REISERDEBUG
+ printf ("symlink=%s\n", linkbuf);
+#endif /* REISERDEBUG */
+
+ dirname = linkbuf;
+ if (*dirname == '/')
+ {
+ /* It's an absolute link, so look it up in root. */
+ dir_id = REISERFS_ROOT_PARENT_OBJECTID;
+ objectid = REISERFS_ROOT_OBJECTID;
+ }
+ else
+ {
+ /* Relative, so look it up in our parent directory. */
+ dir_id = parent_dir_id;
+ objectid = parent_objectid;
+ }
+
+ /* Now lookup the new name. */
+ continue;
+ }
+
+ /* if we have a real file (and we're not just printing possibilities),
+ then this is where we want to exit */
+
+ if (! *dirname || isspace (*dirname))
+ {
+ if (! S_ISREG (mode))
+ {
+ errnum = ERR_BAD_FILETYPE;
+ return 0;
+ }
+
+ filepos = 0;
+ filemax = stat_data_v1(INFO->current_ih) ?
+ sd_v1_size((struct stat_data_v1 *) INFO->current_item) :
+ sd_v2_size((struct stat_data *) INFO->current_item);
+#if 0
+ /* If this is a new stat data and size is > 4GB set filemax to
+ * maximum
+ */
+ if (__le16_to_cpu(INFO->current_ih->ih_version) == ITEM_VERSION_2
+ && sd_size_hi((struct stat_data *) INFO->current_item) > 0)
+ filemax = 0xffffffff;
+#endif
+ INFO->fileinfo.k_dir_id = dir_id;
+ INFO->fileinfo.k_objectid = objectid;
+ return next_key ();
+ }
+
+ /* continue with the file/directory name interpretation */
+ while (*dirname == '/')
+ dirname++;
+ if (! S_ISDIR (mode))
+ {
+ errnum = ERR_BAD_FILETYPE;
+ return 0;
+ }
+ for (rest = dirname; (ch = *rest) && ! isspace (ch) && ch != '/'; rest++);
+ *rest = 0;
+
+# ifndef STAGE1_5
+ if (print_possibilities && ch != '/')
+ do_possibilities = 1;
+# endif /* ! STAGE1_5 */
+
+ while (1)
+ {
+ char *name_end;
+ int num_entries;
+
+ if (! next_key ())
+ return 0;
+#ifdef REISERDEBUG
+ printf ("ih: key %d:%d:%d:%d version:%d\n",
+ __le32_to_cpu(INFO->current_ih->ih_key.k_dir_id),
+ __le32_to_cpu(INFO->current_ih->ih_key.k_objectid),
+ __le32_to_cpu(INFO->current_ih->ih_key.u.v1.k_offset),
+ __le32_to_cpu(INFO->current_ih->ih_key.u.v1.k_uniqueness),
+ __le16_to_cpu(INFO->current_ih->ih_version));
+#endif /* REISERDEBUG */
+
+ if (__le32_to_cpu(INFO->current_ih->ih_key.k_objectid) != objectid)
+ break;
+
+ name_end = INFO->current_item + __le16_to_cpu(INFO->current_ih->ih_item_len);
+ de_head = (struct reiserfs_de_head *) INFO->current_item;
+ num_entries = __le16_to_cpu(INFO->current_ih->u.ih_entry_count);
+ while (num_entries > 0)
+ {
+ char *filename = INFO->current_item + deh_location(de_head);
+ char tmp = *name_end;
+ if ((deh_state(de_head) & DEH_Visible))
+ {
+ int cmp;
+ /* Directory names in ReiserFS are not null
+ * terminated. We write a temporary 0 behind it.
+ * NOTE: that this may overwrite the first block in
+ * the tree cache. That doesn't hurt as long as we
+ * don't call next_key () in between.
+ */
+ *name_end = 0;
+ cmp = substring (dirname, filename);
+ *name_end = tmp;
+# ifndef STAGE1_5
+ if (do_possibilities)
+ {
+ if (cmp <= 0)
+ {
+ char fn[PATH_MAX];
+ struct fsys_reiser_info info_save;
+
+ if (print_possibilities > 0)
+ print_possibilities = -print_possibilities;
+ *name_end = 0;
+ strcpy(fn, filename);
+ *name_end = tmp;
+
+ /* If NAME is "." or "..", do not count it. */
+ if (strcmp (fn, ".") != 0 && strcmp (fn, "..") != 0) {
+ memcpy(&info_save, INFO, sizeof(struct fsys_reiser_info));
+ search_stat (deh_dir_id(de_head), deh_objectid(de_head));
+ sd_print_item(INFO->current_ih, INFO->current_item);
+ printf(" %s\n", fn);
+ search_stat (dir_id, objectid);
+ memcpy(INFO, &info_save, sizeof(struct fsys_reiser_info));
+ }
+ }
+ }
+ else
+# endif /* ! STAGE1_5 */
+ if (cmp == 0)
+ goto found;
+ }
+ /* The beginning of this name marks the end of the next name.
+ */
+ name_end = filename;
+ de_head++;
+ num_entries--;
+ }
+ }
+
+# ifndef STAGE1_5
+ if (print_possibilities < 0)
+ return 1;
+# endif /* ! STAGE1_5 */
+
+ errnum = ERR_FILE_NOT_FOUND;
+ *rest = ch;
+ return 0;
+
+ found:
+ *rest = ch;
+ dirname = rest;
+
+ parent_dir_id = dir_id;
+ parent_objectid = objectid;
+ dir_id = deh_dir_id(de_head);
+ objectid = deh_objectid(de_head);
+ }
+}
+
+/*
+ * U-Boot interface functions
+ */
+
+/*
+ * List given directory
+ *
+ * RETURN: 0 - OK, else grub_error_t errnum
+ */
+int
+reiserfs_ls (char *dirname)
+{
+ char *dir_slash;
+ int res;
+
+ errnum = 0;
+ dir_slash = malloc(strlen(dirname) + 1);
+ if (dir_slash == NULL) {
+ return ERR_NUMBER_OVERFLOW;
+ }
+ strcpy(dir_slash, dirname);
+ /* add "/" to the directory name */
+ strcat(dir_slash, "/");
+
+ print_possibilities = 1;
+ res = reiserfs_dir (dir_slash);
+ free(dir_slash);
+ if (!res || errnum) {
+ return errnum;
+ }
+
+ return 0;
+}
+
+/*
+ * Open file for reading
+ *
+ * RETURN: >0 - OK, size of opened file
+ * <0 - ERROR -grub_error_t errnum
+ */
+int
+reiserfs_open (char *filename)
+{
+ /* open the file */
+ errnum = 0;
+ print_possibilities = 0;
+ if (!reiserfs_dir (filename) || errnum) {
+ return -errnum;
+ }
+ return filemax;
+}
diff --git a/qemu/roms/u-boot/fs/reiserfs/reiserfs_private.h b/qemu/roms/u-boot/fs/reiserfs/reiserfs_private.h
new file mode 100644
index 000000000..9d14c7171
--- /dev/null
+++ b/qemu/roms/u-boot/fs/reiserfs/reiserfs_private.h
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2000-2002 by Hans Reiser, licensing governed by reiserfs/README
+ *
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ *
+ * (C) Copyright 2003 - 2004
+ * Sysgo AG, <www.elinos.com>, Pavel Bartusek <pba@sysgo.com>
+ *
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+/* An implementation for the ReiserFS filesystem ported from GRUB.
+ * Some parts of this code (mainly the structures and defines) are
+ * from the original reiser fs code, as found in the linux kernel.
+ */
+
+#ifndef __BYTE_ORDER
+#if defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#elif defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
+#define __BYTE_ORDER __BIG_ENDIAN
+#else
+#error "unable to define __BYTE_ORDER"
+#endif
+#endif /* not __BYTE_ORDER */
+
+#define FSYS_BUFLEN 0x8000
+#define FSYS_BUF fsys_buf
+
+/* This is the new super block of a journaling reiserfs system */
+struct reiserfs_super_block
+{
+ __u32 s_block_count; /* blocks count */
+ __u32 s_free_blocks; /* free blocks count */
+ __u32 s_root_block; /* root block number */
+ __u32 s_journal_block; /* journal block number */
+ __u32 s_journal_dev; /* journal device number */
+ __u32 s_journal_size; /* size of the journal on FS creation. used to make sure they don't overflow it */
+ __u32 s_journal_trans_max; /* max number of blocks in a transaction. */
+ __u32 s_journal_magic; /* random value made on fs creation */
+ __u32 s_journal_max_batch; /* max number of blocks to batch into a trans */
+ __u32 s_journal_max_commit_age; /* in seconds, how old can an async commit be */
+ __u32 s_journal_max_trans_age; /* in seconds, how old can a transaction be */
+ __u16 s_blocksize; /* block size */
+ __u16 s_oid_maxsize; /* max size of object id array */
+ __u16 s_oid_cursize; /* current size of object id array */
+ __u16 s_state; /* valid or error */
+ char s_magic[16]; /* reiserfs magic string indicates that file system is reiserfs */
+ __u16 s_tree_height; /* height of disk tree */
+ __u16 s_bmap_nr; /* amount of bitmap blocks needed to address each block of file system */
+ __u16 s_version;
+ char s_unused[128]; /* zero filled by mkreiserfs */
+};
+
+
+#define sb_root_block(sbp) (__le32_to_cpu((sbp)->s_root_block))
+#define sb_journal_block(sbp) (__le32_to_cpu((sbp)->s_journal_block))
+#define set_sb_journal_block(sbp,v) ((sbp)->s_journal_block = __cpu_to_le32(v))
+#define sb_journal_size(sbp) (__le32_to_cpu((sbp)->s_journal_size))
+#define sb_blocksize(sbp) (__le16_to_cpu((sbp)->s_blocksize))
+#define set_sb_blocksize(sbp,v) ((sbp)->s_blocksize = __cpu_to_le16(v))
+#define sb_version(sbp) (__le16_to_cpu((sbp)->s_version))
+#define set_sb_version(sbp,v) ((sbp)->s_version = __cpu_to_le16(v))
+
+
+#define REISERFS_MAX_SUPPORTED_VERSION 2
+#define REISERFS_SUPER_MAGIC_STRING "ReIsErFs"
+#define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs"
+#define REISER3FS_SUPER_MAGIC_STRING "ReIsEr3Fs"
+
+#define MAX_HEIGHT 7
+
+/* must be correct to keep the desc and commit structs at 4k */
+#define JOURNAL_TRANS_HALF 1018
+
+/* first block written in a commit. */
+struct reiserfs_journal_desc {
+ __u32 j_trans_id; /* id of commit */
+ __u32 j_len; /* length of commit. len +1 is the commit block */
+ __u32 j_mount_id; /* mount id of this trans*/
+ __u32 j_realblock[JOURNAL_TRANS_HALF]; /* real locations for the first blocks */
+ char j_magic[12];
+};
+
+/* last block written in a commit */
+struct reiserfs_journal_commit {
+ __u32 j_trans_id; /* must match j_trans_id from the desc block */
+ __u32 j_len; /* ditto */
+ __u32 j_realblock[JOURNAL_TRANS_HALF]; /* real locations for the last blocks */
+ char j_digest[16]; /* md5 sum of all the blocks involved, including desc and commit. not used, kill it */
+};
+
+/* this header block gets written whenever a transaction is considered
+ fully flushed, and is more recent than the last fully flushed
+ transaction.
+ fully flushed means all the log blocks and all the real blocks are
+ on disk, and this transaction does not need to be replayed.
+*/
+struct reiserfs_journal_header {
+ /* id of last fully flushed transaction */
+ __u32 j_last_flush_trans_id;
+ /* offset in the log of where to start replay after a crash */
+ __u32 j_first_unflushed_offset;
+ /* mount id to detect very old transactions */
+ __u32 j_mount_id;
+};
+
+/* magic string to find desc blocks in the journal */
+#define JOURNAL_DESC_MAGIC "ReIsErLB"
+
+
+/*
+ * directories use this key as well as old files
+ */
+struct offset_v1
+{
+ /*
+ * for regular files this is the offset to the first byte of the
+ * body, contained in the object-item, as measured from the start of
+ * the entire body of the object.
+ *
+ * for directory entries, k_offset consists of hash derived from
+ * hashing the name and using few bits (23 or more) of the resulting
+ * hash, and generation number that allows distinguishing names with
+ * hash collisions. If number of collisions overflows generation
+ * number, we return EEXIST. High order bit is 0 always
+ */
+ __u32 k_offset;
+ __u32 k_uniqueness;
+};
+
+struct offset_v2 {
+ /*
+ * for regular files this is the offset to the first byte of the
+ * body, contained in the object-item, as measured from the start of
+ * the entire body of the object.
+ *
+ * for directory entries, k_offset consists of hash derived from
+ * hashing the name and using few bits (23 or more) of the resulting
+ * hash, and generation number that allows distinguishing names with
+ * hash collisions. If number of collisions overflows generation
+ * number, we return EEXIST. High order bit is 0 always
+ */
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ /* little endian version */
+ __u64 k_offset:60;
+ __u64 k_type: 4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ /* big endian version */
+ __u64 k_type: 4;
+ __u64 k_offset:60;
+#else
+#error "__LITTLE_ENDIAN_BITFIELD or __BIG_ENDIAN_BITFIELD must be defined"
+#endif
+} __attribute__ ((__packed__));
+
+#define TYPE_MAXTYPE 3
+#define TYPE_ANY 15
+
+#if (__BYTE_ORDER == __BIG_ENDIAN)
+typedef union {
+ struct offset_v2 offset_v2;
+ __u64 linear;
+} __attribute__ ((__packed__)) offset_v2_esafe_overlay;
+
+static inline __u16 offset_v2_k_type( const struct offset_v2 *v2 )
+{
+ offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2;
+ tmp.linear = __le64_to_cpu( tmp.linear );
+ return (tmp.offset_v2.k_type <= TYPE_MAXTYPE)?tmp.offset_v2.k_type:TYPE_ANY;
+}
+
+static inline loff_t offset_v2_k_offset( const struct offset_v2 *v2 )
+{
+ offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2;
+ tmp.linear = __le64_to_cpu( tmp.linear );
+ return tmp.offset_v2.k_offset;
+}
+#elif (__BYTE_ORDER == __LITTLE_ENDIAN)
+# define offset_v2_k_type(v2) ((v2)->k_type)
+# define offset_v2_k_offset(v2) ((v2)->k_offset)
+#else
+#error "__BYTE_ORDER must be __LITTLE_ENDIAN or __BIG_ENDIAN"
+#endif
+
+struct key
+{
+ /* packing locality: by default parent directory object id */
+ __u32 k_dir_id;
+ /* object identifier */
+ __u32 k_objectid;
+ /* the offset and node type (old and new form) */
+ union
+ {
+ struct offset_v1 v1;
+ struct offset_v2 v2;
+ }
+ u;
+};
+
+#define KEY_SIZE (sizeof (struct key))
+
+/* Header of a disk block. More precisely, header of a formatted leaf
+ or internal node, and not the header of an unformatted node. */
+struct block_head
+{
+ __u16 blk_level; /* Level of a block in the tree. */
+ __u16 blk_nr_item; /* Number of keys/items in a block. */
+ __u16 blk_free_space; /* Block free space in bytes. */
+ struct key blk_right_delim_key; /* Right delimiting key for this block (supported for leaf level nodes
+ only) */
+};
+#define BLKH_SIZE (sizeof (struct block_head))
+#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */
+
+struct item_head
+{
+ /* Everything in the tree is found by searching for it based on
+ * its key.*/
+ struct key ih_key;
+ union {
+ /* The free space in the last unformatted node of an
+ indirect item if this is an indirect item. This
+ equals 0xFFFF iff this is a direct item or stat data
+ item. Note that the key, not this field, is used to
+ determine the item type, and thus which field this
+ union contains. */
+ __u16 ih_free_space;
+ /* Iff this is a directory item, this field equals the
+ number of directory entries in the directory item. */
+ __u16 ih_entry_count;
+ } __attribute__ ((__packed__)) u;
+ __u16 ih_item_len; /* total size of the item body */
+ __u16 ih_item_location; /* an offset to the item body
+ * within the block */
+ __u16 ih_version; /* 0 for all old items, 2 for new
+ ones. Highest bit is set by fsck
+ temporary, cleaned after all
+ done */
+} __attribute__ ((__packed__));
+
+/* size of item header */
+#define IH_SIZE (sizeof (struct item_head))
+
+#define ITEM_VERSION_1 0
+#define ITEM_VERSION_2 1
+
+#define ih_version(ih) (__le16_to_cpu((ih)->ih_version))
+
+#define IH_KEY_OFFSET(ih) (ih_version(ih) == ITEM_VERSION_1 \
+ ? __le32_to_cpu((ih)->ih_key.u.v1.k_offset) \
+ : offset_v2_k_offset(&((ih)->ih_key.u.v2)))
+
+#define IH_KEY_ISTYPE(ih, type) (ih_version(ih) == ITEM_VERSION_1 \
+ ? __le32_to_cpu((ih)->ih_key.u.v1.k_uniqueness) == V1_##type \
+ : offset_v2_k_type(&((ih)->ih_key.u.v2)) == V2_##type)
+
+/***************************************************************************/
+/* DISK CHILD */
+/***************************************************************************/
+/* Disk child pointer: The pointer from an internal node of the tree
+ to a node that is on disk. */
+struct disk_child {
+ __u32 dc_block_number; /* Disk child's block number. */
+ __u16 dc_size; /* Disk child's used space. */
+ __u16 dc_reserved;
+};
+
+#define DC_SIZE (sizeof(struct disk_child))
+#define dc_block_number(dc_p) (__le32_to_cpu((dc_p)->dc_block_number))
+
+
+/*
+ * old stat data is 32 bytes long. We are going to distinguish new one by
+ * different size
+ */
+struct stat_data_v1
+{
+ __u16 sd_mode; /* file type, permissions */
+ __u16 sd_nlink; /* number of hard links */
+ __u16 sd_uid; /* owner */
+ __u16 sd_gid; /* group */
+ __u32 sd_size; /* file size */
+ __u32 sd_atime; /* time of last access */
+ __u32 sd_mtime; /* time file was last modified */
+ __u32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+ union {
+ __u32 sd_rdev;
+ __u32 sd_blocks; /* number of blocks file uses */
+ } __attribute__ ((__packed__)) u;
+ __u32 sd_first_direct_byte; /* first byte of file which is stored
+ in a direct item: except that if it
+ equals 1 it is a symlink and if it
+ equals ~(__u32)0 there is no
+ direct item. The existence of this
+ field really grates on me. Let's
+ replace it with a macro based on
+ sd_size and our tail suppression
+ policy. Someday. -Hans */
+} __attribute__ ((__packed__));
+
+#define stat_data_v1(ih) (ih_version(ih) == ITEM_VERSION_1)
+#define sd_v1_mode(sdp) ((sdp)->sd_mode)
+#define sd_v1_nlink(sdp) (__le16_to_cpu((sdp)->sd_nlink))
+#define sd_v1_uid(sdp) (__le16_to_cpu((sdp)->sd_uid))
+#define sd_v1_gid(sdp) (__le16_to_cpu((sdp)->sd_gid))
+#define sd_v1_size(sdp) (__le32_to_cpu((sdp)->sd_size))
+#define sd_v1_mtime(sdp) (__le32_to_cpu((sdp)->sd_mtime))
+
+/* Stat Data on disk (reiserfs version of UFS disk inode minus the
+ address blocks) */
+struct stat_data {
+ __u16 sd_mode; /* file type, permissions */
+ __u16 sd_attrs; /* persistent inode flags */
+ __u32 sd_nlink; /* number of hard links */
+ __u64 sd_size; /* file size */
+ __u32 sd_uid; /* owner */
+ __u32 sd_gid; /* group */
+ __u32 sd_atime; /* time of last access */
+ __u32 sd_mtime; /* time file was last modified */
+ __u32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */
+ __u32 sd_blocks;
+ union {
+ __u32 sd_rdev;
+ __u32 sd_generation;
+ /*__u32 sd_first_direct_byte; */
+ /* first byte of file which is stored in a
+ direct item: except that if it equals 1
+ it is a symlink and if it equals
+ ~(__u32)0 there is no direct item. The
+ existence of this field really grates
+ on me. Let's replace it with a macro
+ based on sd_size and our tail
+ suppression policy? */
+ } __attribute__ ((__packed__)) u;
+} __attribute__ ((__packed__));
+
+#define stat_data_v2(ih) (ih_version(ih) == ITEM_VERSION_2)
+#define sd_v2_mode(sdp) (__le16_to_cpu((sdp)->sd_mode))
+#define sd_v2_nlink(sdp) (__le32_to_cpu((sdp)->sd_nlink))
+#define sd_v2_size(sdp) (__le64_to_cpu((sdp)->sd_size))
+#define sd_v2_uid(sdp) (__le32_to_cpu((sdp)->sd_uid))
+#define sd_v2_gid(sdp) (__le32_to_cpu((sdp)->sd_gid))
+#define sd_v2_mtime(sdp) (__le32_to_cpu((sdp)->sd_mtime))
+
+#define sd_mode(sdp) (__le16_to_cpu((sdp)->sd_mode))
+#define sd_size(sdp) (__le32_to_cpu((sdp)->sd_size))
+#define sd_size_hi(sdp) (__le32_to_cpu((sdp)->sd_size_hi))
+
+struct reiserfs_de_head
+{
+ __u32 deh_offset; /* third component of the directory entry key */
+ __u32 deh_dir_id; /* objectid of the parent directory of the
+ object, that is referenced by directory entry */
+ __u32 deh_objectid;/* objectid of the object, that is referenced by
+ directory entry */
+ __u16 deh_location;/* offset of name in the whole item */
+ __u16 deh_state; /* whether 1) entry contains stat data (for
+ future), and 2) whether entry is hidden
+ (unlinked) */
+};
+
+#define DEH_SIZE (sizeof (struct reiserfs_de_head))
+#define deh_offset(p_deh) (__le32_to_cpu((p_deh)->deh_offset))
+#define deh_dir_id(p_deh) (__le32_to_cpu((p_deh)->deh_dir_id))
+#define deh_objectid(p_deh) (__le32_to_cpu((p_deh)->deh_objectid))
+#define deh_location(p_deh) (__le16_to_cpu((p_deh)->deh_location))
+#define deh_state(p_deh) (__le16_to_cpu((p_deh)->deh_state))
+
+
+#define DEH_Statdata (1 << 0) /* not used now */
+#define DEH_Visible (1 << 2)
+
+#define SD_OFFSET 0
+#define SD_UNIQUENESS 0
+#define DOT_OFFSET 1
+#define DOT_DOT_OFFSET 2
+#define DIRENTRY_UNIQUENESS 500
+
+#define V1_TYPE_STAT_DATA 0x0
+#define V1_TYPE_DIRECT 0xffffffff
+#define V1_TYPE_INDIRECT 0xfffffffe
+#define V1_TYPE_DIRECTORY_MAX 0xfffffffd
+#define V2_TYPE_STAT_DATA 0
+#define V2_TYPE_INDIRECT 1
+#define V2_TYPE_DIRECT 2
+#define V2_TYPE_DIRENTRY 3
+
+#define REISERFS_ROOT_OBJECTID 2
+#define REISERFS_ROOT_PARENT_OBJECTID 1
+#define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024)
+/* the spot for the super in versions 3.5 - 3.5.11 (inclusive) */
+#define REISERFS_OLD_DISK_OFFSET_IN_BYTES (8 * 1024)
+#define REISERFS_OLD_BLOCKSIZE 4096
+
+#define S_ISREG(mode) (((mode) & 0170000) == 0100000)
+#define S_ISDIR(mode) (((mode) & 0170000) == 0040000)
+#define S_ISLNK(mode) (((mode) & 0170000) == 0120000)
+
+#define PATH_MAX 1024 /* include/linux/limits.h */
+#define MAX_LINK_COUNT 5 /* number of symbolic links to follow */
+
+/* The size of the node cache */
+#define FSYSREISER_CACHE_SIZE 24*1024
+#define FSYSREISER_MIN_BLOCKSIZE SECTOR_SIZE
+#define FSYSREISER_MAX_BLOCKSIZE FSYSREISER_CACHE_SIZE / 3
+
+/* Info about currently opened file */
+struct fsys_reiser_fileinfo
+{
+ __u32 k_dir_id;
+ __u32 k_objectid;
+};
+
+/* In memory info about the currently mounted filesystem */
+struct fsys_reiser_info
+{
+ /* The last read item head */
+ struct item_head *current_ih;
+ /* The last read item */
+ char *current_item;
+ /* The information for the currently opened file */
+ struct fsys_reiser_fileinfo fileinfo;
+ /* The start of the journal */
+ __u32 journal_block;
+ /* The size of the journal */
+ __u32 journal_block_count;
+ /* The first valid descriptor block in journal
+ (relative to journal_block) */
+ __u32 journal_first_desc;
+
+ /* The ReiserFS version. */
+ __u16 version;
+ /* The current depth of the reiser tree. */
+ __u16 tree_depth;
+ /* SECTOR_SIZE << blocksize_shift == blocksize. */
+ __u8 blocksize_shift;
+ /* 1 << full_blocksize_shift == blocksize. */
+ __u8 fullblocksize_shift;
+ /* The reiserfs block size (must be a power of 2) */
+ __u16 blocksize;
+ /* The number of cached tree nodes */
+ __u16 cached_slots;
+ /* The number of valid transactions in journal */
+ __u16 journal_transactions;
+
+ unsigned int blocks[MAX_HEIGHT];
+ unsigned int next_key_nr[MAX_HEIGHT];
+};
+
+/* The cached s+tree blocks in FSYS_BUF, see below
+ * for a more detailed description.
+ */
+#define ROOT ((char *) ((int) FSYS_BUF))
+#define CACHE(i) (ROOT + ((i) << INFO->fullblocksize_shift))
+#define LEAF CACHE (DISK_LEAF_NODE_LEVEL)
+
+#define BLOCKHEAD(cache) ((struct block_head *) cache)
+#define ITEMHEAD ((struct item_head *) ((int) LEAF + BLKH_SIZE))
+#define KEY(cache) ((struct key *) ((int) cache + BLKH_SIZE))
+#define DC(cache) ((struct disk_child *) \
+ ((int) cache + BLKH_SIZE + KEY_SIZE * nr_item))
+/* The fsys_reiser_info block.
+ */
+#define INFO \
+ ((struct fsys_reiser_info *) ((int) FSYS_BUF + FSYSREISER_CACHE_SIZE))
+/*
+ * The journal cache. For each transaction it contains the number of
+ * blocks followed by the real block numbers of this transaction.
+ *
+ * If the block numbers of some transaction won't fit in this space,
+ * this list is stopped with a 0xffffffff marker and the remaining
+ * uncommitted transactions aren't cached.
+ */
+#define JOURNAL_START ((__u32 *) (INFO + 1))
+#define JOURNAL_END ((__u32 *) (FSYS_BUF + FSYS_BUFLEN))
+
+
+static __inline__ unsigned long
+log2 (unsigned long word)
+{
+#ifdef __I386__
+ __asm__ ("bsfl %1,%0"
+ : "=r" (word)
+ : "r" (word));
+ return word;
+#else
+ int i;
+
+ for(i=0; i<(8*sizeof(word)); i++)
+ if ((1<<i) & word)
+ return i;
+
+ return 0;
+#endif
+}
+
+static __inline__ int
+is_power_of_two (unsigned long word)
+{
+ return (word & -word) == word;
+}
+
+extern const char *bb_mode_string(int mode);
+extern int reiserfs_devread (int sector, int byte_offset, int byte_len, char *buf);
diff --git a/qemu/roms/u-boot/fs/sandbox/Makefile b/qemu/roms/u-boot/fs/sandbox/Makefile
new file mode 100644
index 000000000..ca238f6d7
--- /dev/null
+++ b/qemu/roms/u-boot/fs/sandbox/Makefile
@@ -0,0 +1,13 @@
+#
+# Copyright (c) 2012, Google Inc.
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# (C) Copyright 2003
+# Pavel Bartusek, Sysgo Real-Time Solutions AG, pba@sysgo.de
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := sandboxfs.o
diff --git a/qemu/roms/u-boot/fs/sandbox/sandboxfs.c b/qemu/roms/u-boot/fs/sandbox/sandboxfs.c
new file mode 100644
index 000000000..85079788c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/sandbox/sandboxfs.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2012, Google Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <fs.h>
+#include <os.h>
+
+int sandbox_fs_set_blk_dev(block_dev_desc_t *rbdd, disk_partition_t *info)
+{
+ return 0;
+}
+
+long sandbox_fs_read_at(const char *filename, unsigned long pos,
+ void *buffer, unsigned long maxsize)
+{
+ ssize_t size;
+ int fd, ret;
+
+ fd = os_open(filename, OS_O_RDONLY);
+ if (fd < 0)
+ return fd;
+ ret = os_lseek(fd, pos, OS_SEEK_SET);
+ if (ret == -1) {
+ os_close(fd);
+ return ret;
+ }
+ if (!maxsize)
+ maxsize = os_get_filesize(filename);
+ size = os_read(fd, buffer, maxsize);
+ os_close(fd);
+
+ return size;
+}
+
+long sandbox_fs_write_at(const char *filename, unsigned long pos,
+ void *buffer, unsigned long towrite)
+{
+ ssize_t size;
+ int fd, ret;
+
+ fd = os_open(filename, OS_O_RDWR | OS_O_CREAT);
+ if (fd < 0)
+ return fd;
+ ret = os_lseek(fd, pos, OS_SEEK_SET);
+ if (ret == -1) {
+ os_close(fd);
+ return ret;
+ }
+ size = os_write(fd, buffer, towrite);
+ os_close(fd);
+
+ return size;
+}
+
+int sandbox_fs_ls(const char *dirname)
+{
+ struct os_dirent_node *head, *node;
+ int ret;
+
+ ret = os_dirent_ls(dirname, &head);
+ if (ret)
+ return ret;
+
+ for (node = head; node; node = node->next) {
+ printf("%s %10lu %s\n", os_dirent_get_typename(node->type),
+ node->size, node->name);
+ }
+
+ return 0;
+}
+
+int sandbox_fs_exists(const char *filename)
+{
+ ssize_t sz;
+
+ sz = os_get_filesize(filename);
+ return sz >= 0;
+}
+
+void sandbox_fs_close(void)
+{
+}
+
+int fs_read_sandbox(const char *filename, void *buf, int offset, int len)
+{
+ int len_read;
+
+ len_read = sandbox_fs_read_at(filename, offset, buf, len);
+ if (len_read == -1) {
+ printf("** Unable to read file %s **\n", filename);
+ return -1;
+ }
+
+ return len_read;
+}
+
+int fs_write_sandbox(const char *filename, void *buf, int offset, int len)
+{
+ int len_written;
+
+ len_written = sandbox_fs_write_at(filename, offset, buf, len);
+ if (len_written == -1) {
+ printf("** Unable to write file %s **\n", filename);
+ return -1;
+ }
+
+ return len_written;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/Makefile b/qemu/roms/u-boot/fs/ubifs/Makefile
new file mode 100644
index 000000000..8c8c6ac68
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/Makefile
@@ -0,0 +1,15 @@
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+#
+# (C) Copyright 2003
+# Pavel Bartusek, Sysgo Real-Time Solutions AG, pba@sysgo.de
+#
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := ubifs.o io.o super.o sb.o master.o lpt.o
+obj-y += lpt_commit.o scan.o lprops.o
+obj-y += tnc.o tnc_misc.o debug.o crc16.o budget.o
+obj-y += log.o orphan.o recovery.o replay.o
diff --git a/qemu/roms/u-boot/fs/ubifs/budget.c b/qemu/roms/u-boot/fs/ubifs/budget.c
new file mode 100644
index 000000000..85377ea2a
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/budget.c
@@ -0,0 +1,113 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements the budgeting sub-system which is responsible for UBIFS
+ * space management.
+ *
+ * Factors such as compression, wasted space at the ends of LEBs, space in other
+ * journal heads, the effect of updates on the index, and so on, make it
+ * impossible to accurately predict the amount of space needed. Consequently
+ * approximations are used.
+ */
+
+#include "ubifs.h"
+#include <linux/math64.h>
+
+/**
+ * ubifs_calc_min_idx_lebs - calculate amount of eraseblocks for the index.
+ * @c: UBIFS file-system description object
+ *
+ * This function calculates and returns the number of eraseblocks which should
+ * be kept for index usage.
+ */
+int ubifs_calc_min_idx_lebs(struct ubifs_info *c)
+{
+ int idx_lebs, eff_leb_size = c->leb_size - c->max_idx_node_sz;
+ long long idx_size;
+
+ idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx;
+
+ /* And make sure we have thrice the index size of space reserved */
+ idx_size = idx_size + (idx_size << 1);
+
+ /*
+ * We do not maintain 'old_idx_size' as 'old_idx_lebs'/'old_idx_bytes'
+ * pair, nor similarly the two variables for the new index size, so we
+ * have to do this costly 64-bit division on fast-path.
+ */
+ idx_size += eff_leb_size - 1;
+ idx_lebs = div_u64(idx_size, eff_leb_size);
+ /*
+ * The index head is not available for the in-the-gaps method, so add an
+ * extra LEB to compensate.
+ */
+ idx_lebs += 1;
+ if (idx_lebs < MIN_INDEX_LEBS)
+ idx_lebs = MIN_INDEX_LEBS;
+ return idx_lebs;
+}
+
+/**
+ * ubifs_reported_space - calculate reported free space.
+ * @c: the UBIFS file-system description object
+ * @free: amount of free space
+ *
+ * This function calculates amount of free space which will be reported to
+ * user-space. User-space application tend to expect that if the file-system
+ * (e.g., via the 'statfs()' call) reports that it has N bytes available, they
+ * are able to write a file of size N. UBIFS attaches node headers to each data
+ * node and it has to write indexing nodes as well. This introduces additional
+ * overhead, and UBIFS has to report slightly less free space to meet the above
+ * expectations.
+ *
+ * This function assumes free space is made up of uncompressed data nodes and
+ * full index nodes (one per data node, tripled because we always allow enough
+ * space to write the index thrice).
+ *
+ * Note, the calculation is pessimistic, which means that most of the time
+ * UBIFS reports less space than it actually has.
+ */
+long long ubifs_reported_space(const struct ubifs_info *c, long long free)
+{
+ int divisor, factor, f;
+
+ /*
+ * Reported space size is @free * X, where X is UBIFS block size
+ * divided by UBIFS block size + all overhead one data block
+ * introduces. The overhead is the node header + indexing overhead.
+ *
+ * Indexing overhead calculations are based on the following formula:
+ * I = N/(f - 1) + 1, where I - number of indexing nodes, N - number
+ * of data nodes, f - fanout. Because effective UBIFS fanout is twice
+ * as less than maximum fanout, we assume that each data node
+ * introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes.
+ * Note, the multiplier 3 is because UBIFS reserves thrice as more space
+ * for the index.
+ */
+ f = c->fanout > 3 ? c->fanout >> 1 : 2;
+ factor = UBIFS_BLOCK_SIZE;
+ divisor = UBIFS_MAX_DATA_NODE_SZ;
+ divisor += (c->max_idx_node_sz * 3) / (f - 1);
+ free *= factor;
+ return div_u64(free, divisor);
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/crc16.c b/qemu/roms/u-boot/fs/ubifs/crc16.c
new file mode 100644
index 000000000..443ccf855
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/crc16.c
@@ -0,0 +1,60 @@
+/*
+ * crc16.c
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include "crc16.h"
+
+/** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */
+u16 const crc16_table[256] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
+};
+
+/**
+ * crc16 - compute the CRC-16 for the data buffer
+ * @crc: previous CRC value
+ * @buffer: data pointer
+ * @len: number of bytes in the buffer
+ *
+ * Returns the updated CRC value.
+ */
+u16 crc16(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = crc16_byte(crc, *buffer++);
+ return crc;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/crc16.h b/qemu/roms/u-boot/fs/ubifs/crc16.h
new file mode 100644
index 000000000..052fd3311
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/crc16.h
@@ -0,0 +1,29 @@
+/*
+ * crc16.h - CRC-16 routine
+ *
+ * Implements the standard CRC-16:
+ * Width 16
+ * Poly 0x8005 (x^16 + x^15 + x^2 + 1)
+ * Init 0
+ *
+ * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifndef __CRC16_H
+#define __CRC16_H
+
+#include <linux/types.h>
+
+extern u16 const crc16_table[256];
+
+extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
+
+static inline u16 crc16_byte(u16 crc, const u8 data)
+{
+ return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
+}
+
+#endif /* __CRC16_H */
diff --git a/qemu/roms/u-boot/fs/ubifs/debug.c b/qemu/roms/u-boot/fs/ubifs/debug.c
new file mode 100644
index 000000000..6afb8835a
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/debug.c
@@ -0,0 +1,156 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This file implements most of the debugging stuff which is compiled in only
+ * when it is enabled. But some debugging check functions are implemented in
+ * corresponding subsystem, just because they are closely related and utilize
+ * various local functions of those subsystems.
+ */
+
+#define UBIFS_DBG_PRESERVE_UBI
+
+#include "ubifs.h"
+
+#ifdef CONFIG_UBIFS_FS_DEBUG
+
+DEFINE_SPINLOCK(dbg_lock);
+
+static char dbg_key_buf0[128];
+static char dbg_key_buf1[128];
+
+unsigned int ubifs_msg_flags = UBIFS_MSG_FLAGS_DEFAULT;
+unsigned int ubifs_chk_flags = UBIFS_CHK_FLAGS_DEFAULT;
+unsigned int ubifs_tst_flags;
+
+module_param_named(debug_msgs, ubifs_msg_flags, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
+module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
+
+MODULE_PARM_DESC(debug_msgs, "Debug message type flags");
+MODULE_PARM_DESC(debug_chks, "Debug check flags");
+MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
+
+static const char *get_key_type(int type)
+{
+ switch (type) {
+ case UBIFS_INO_KEY:
+ return "inode";
+ case UBIFS_DENT_KEY:
+ return "direntry";
+ case UBIFS_XENT_KEY:
+ return "xentry";
+ case UBIFS_DATA_KEY:
+ return "data";
+ case UBIFS_TRUN_KEY:
+ return "truncate";
+ default:
+ return "unknown/invalid key";
+ }
+}
+
+static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
+ char *buffer)
+{
+ char *p = buffer;
+ int type = key_type(c, key);
+
+ if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
+ switch (type) {
+ case UBIFS_INO_KEY:
+ sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
+ get_key_type(type));
+ break;
+ case UBIFS_DENT_KEY:
+ case UBIFS_XENT_KEY:
+ sprintf(p, "(%lu, %s, %#08x)",
+ (unsigned long)key_inum(c, key),
+ get_key_type(type), key_hash(c, key));
+ break;
+ case UBIFS_DATA_KEY:
+ sprintf(p, "(%lu, %s, %u)",
+ (unsigned long)key_inum(c, key),
+ get_key_type(type), key_block(c, key));
+ break;
+ case UBIFS_TRUN_KEY:
+ sprintf(p, "(%lu, %s)",
+ (unsigned long)key_inum(c, key),
+ get_key_type(type));
+ break;
+ default:
+ sprintf(p, "(bad key type: %#08x, %#08x)",
+ key->u32[0], key->u32[1]);
+ }
+ } else
+ sprintf(p, "bad key format %d", c->key_fmt);
+}
+
+const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
+{
+ /* dbg_lock must be held */
+ sprintf_key(c, key, dbg_key_buf0);
+ return dbg_key_buf0;
+}
+
+const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
+{
+ /* dbg_lock must be held */
+ sprintf_key(c, key, dbg_key_buf1);
+ return dbg_key_buf1;
+}
+
+/**
+ * ubifs_debugging_init - initialize UBIFS debugging.
+ * @c: UBIFS file-system description object
+ *
+ * This function initializes debugging-related data for the file system.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubifs_debugging_init(struct ubifs_info *c)
+{
+ c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
+ if (!c->dbg)
+ return -ENOMEM;
+
+ c->dbg->buf = vmalloc(c->leb_size);
+ if (!c->dbg->buf)
+ goto out;
+
+ return 0;
+
+out:
+ kfree(c->dbg);
+ return -ENOMEM;
+}
+
+/**
+ * ubifs_debugging_exit - free debugging data.
+ * @c: UBIFS file-system description object
+ */
+void ubifs_debugging_exit(struct ubifs_info *c)
+{
+ vfree(c->dbg->buf);
+ kfree(c->dbg);
+}
+
+#endif /* CONFIG_UBIFS_FS_DEBUG */
diff --git a/qemu/roms/u-boot/fs/ubifs/debug.h b/qemu/roms/u-boot/fs/ubifs/debug.h
new file mode 100644
index 000000000..62617b692
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/debug.h
@@ -0,0 +1,392 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+#ifndef __UBIFS_DEBUG_H__
+#define __UBIFS_DEBUG_H__
+
+#ifdef CONFIG_UBIFS_FS_DEBUG
+
+/**
+ * ubifs_debug_info - per-FS debugging information.
+ * @buf: a buffer of LEB size, used for various purposes
+ * @old_zroot: old index root - used by 'dbg_check_old_index()'
+ * @old_zroot_level: old index root level - used by 'dbg_check_old_index()'
+ * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()'
+ * @failure_mode: failure mode for recovery testing
+ * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls
+ * @fail_timeout: time in jiffies when delay of failure mode expires
+ * @fail_cnt: current number of calls to failure mode I/O functions
+ * @fail_cnt_max: number of calls by which to delay failure mode
+ * @chk_lpt_sz: used by LPT tree size checker
+ * @chk_lpt_sz2: used by LPT tree size checker
+ * @chk_lpt_wastage: used by LPT tree size checker
+ * @chk_lpt_lebs: used by LPT tree size checker
+ * @new_nhead_offs: used by LPT tree size checker
+ * @new_ihead_lnum: used by debugging to check @c->ihead_lnum
+ * @new_ihead_offs: used by debugging to check @c->ihead_offs
+ *
+ * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()')
+ * @saved_free: saved free space (used by 'dbg_save_space_info()')
+ *
+ * dfs_dir_name: name of debugfs directory containing this file-system's files
+ * dfs_dir: direntry object of the file-system debugfs directory
+ * dfs_dump_lprops: "dump lprops" debugfs knob
+ * dfs_dump_budg: "dump budgeting information" debugfs knob
+ * dfs_dump_tnc: "dump TNC" debugfs knob
+ */
+struct ubifs_debug_info {
+ void *buf;
+ struct ubifs_zbranch old_zroot;
+ int old_zroot_level;
+ unsigned long long old_zroot_sqnum;
+ int failure_mode;
+ int fail_delay;
+ unsigned long fail_timeout;
+ unsigned int fail_cnt;
+ unsigned int fail_cnt_max;
+ long long chk_lpt_sz;
+ long long chk_lpt_sz2;
+ long long chk_lpt_wastage;
+ int chk_lpt_lebs;
+ int new_nhead_offs;
+ int new_ihead_lnum;
+ int new_ihead_offs;
+
+ struct ubifs_lp_stats saved_lst;
+ long long saved_free;
+
+ char dfs_dir_name[100];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_dump_lprops;
+ struct dentry *dfs_dump_budg;
+ struct dentry *dfs_dump_tnc;
+};
+
+#define UBIFS_DBG(op) op
+
+#define ubifs_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, 0); \
+ dbg_dump_stack(); \
+ } \
+} while (0)
+
+#define ubifs_assert_cmt_locked(c) do { \
+ if (unlikely(down_write_trylock(&(c)->commit_sem))) { \
+ up_write(&(c)->commit_sem); \
+ printk(KERN_CRIT "commit lock is not locked!\n"); \
+ ubifs_assert(0); \
+ } \
+} while (0)
+
+#define dbg_dump_stack() do { \
+ if (!dbg_failure_mode) \
+ dump_stack(); \
+} while (0)
+
+/* Generic debugging messages */
+#define dbg_msg(fmt, ...) do { \
+ spin_lock(&dbg_lock); \
+ printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", 0, \
+ __func__, ##__VA_ARGS__); \
+ spin_unlock(&dbg_lock); \
+} while (0)
+
+#define dbg_do_msg(typ, fmt, ...) do { \
+ if (ubifs_msg_flags & typ) \
+ dbg_msg(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dbg_err(fmt, ...) do { \
+ spin_lock(&dbg_lock); \
+ ubifs_err(fmt, ##__VA_ARGS__); \
+ spin_unlock(&dbg_lock); \
+} while (0)
+
+const char *dbg_key_str0(const struct ubifs_info *c,
+ const union ubifs_key *key);
+const char *dbg_key_str1(const struct ubifs_info *c,
+ const union ubifs_key *key);
+
+/*
+ * DBGKEY macros require @dbg_lock to be held, which it is in the dbg message
+ * macros.
+ */
+#define DBGKEY(key) dbg_key_str0(c, (key))
+#define DBGKEY1(key) dbg_key_str1(c, (key))
+
+/* General messages */
+#define dbg_gen(fmt, ...) dbg_do_msg(UBIFS_MSG_GEN, fmt, ##__VA_ARGS__)
+
+/* Additional journal messages */
+#define dbg_jnl(fmt, ...) dbg_do_msg(UBIFS_MSG_JNL, fmt, ##__VA_ARGS__)
+
+/* Additional TNC messages */
+#define dbg_tnc(fmt, ...) dbg_do_msg(UBIFS_MSG_TNC, fmt, ##__VA_ARGS__)
+
+/* Additional lprops messages */
+#define dbg_lp(fmt, ...) dbg_do_msg(UBIFS_MSG_LP, fmt, ##__VA_ARGS__)
+
+/* Additional LEB find messages */
+#define dbg_find(fmt, ...) dbg_do_msg(UBIFS_MSG_FIND, fmt, ##__VA_ARGS__)
+
+/* Additional mount messages */
+#define dbg_mnt(fmt, ...) dbg_do_msg(UBIFS_MSG_MNT, fmt, ##__VA_ARGS__)
+
+/* Additional I/O messages */
+#define dbg_io(fmt, ...) dbg_do_msg(UBIFS_MSG_IO, fmt, ##__VA_ARGS__)
+
+/* Additional commit messages */
+#define dbg_cmt(fmt, ...) dbg_do_msg(UBIFS_MSG_CMT, fmt, ##__VA_ARGS__)
+
+/* Additional budgeting messages */
+#define dbg_budg(fmt, ...) dbg_do_msg(UBIFS_MSG_BUDG, fmt, ##__VA_ARGS__)
+
+/* Additional log messages */
+#define dbg_log(fmt, ...) dbg_do_msg(UBIFS_MSG_LOG, fmt, ##__VA_ARGS__)
+
+/* Additional gc messages */
+#define dbg_gc(fmt, ...) dbg_do_msg(UBIFS_MSG_GC, fmt, ##__VA_ARGS__)
+
+/* Additional scan messages */
+#define dbg_scan(fmt, ...) dbg_do_msg(UBIFS_MSG_SCAN, fmt, ##__VA_ARGS__)
+
+/* Additional recovery messages */
+#define dbg_rcvry(fmt, ...) dbg_do_msg(UBIFS_MSG_RCVRY, fmt, ##__VA_ARGS__)
+
+/*
+ * Debugging message type flags (must match msg_type_names in debug.c).
+ *
+ * UBIFS_MSG_GEN: general messages
+ * UBIFS_MSG_JNL: journal messages
+ * UBIFS_MSG_MNT: mount messages
+ * UBIFS_MSG_CMT: commit messages
+ * UBIFS_MSG_FIND: LEB find messages
+ * UBIFS_MSG_BUDG: budgeting messages
+ * UBIFS_MSG_GC: garbage collection messages
+ * UBIFS_MSG_TNC: TNC messages
+ * UBIFS_MSG_LP: lprops messages
+ * UBIFS_MSG_IO: I/O messages
+ * UBIFS_MSG_LOG: log messages
+ * UBIFS_MSG_SCAN: scan messages
+ * UBIFS_MSG_RCVRY: recovery messages
+ */
+enum {
+ UBIFS_MSG_GEN = 0x1,
+ UBIFS_MSG_JNL = 0x2,
+ UBIFS_MSG_MNT = 0x4,
+ UBIFS_MSG_CMT = 0x8,
+ UBIFS_MSG_FIND = 0x10,
+ UBIFS_MSG_BUDG = 0x20,
+ UBIFS_MSG_GC = 0x40,
+ UBIFS_MSG_TNC = 0x80,
+ UBIFS_MSG_LP = 0x100,
+ UBIFS_MSG_IO = 0x200,
+ UBIFS_MSG_LOG = 0x400,
+ UBIFS_MSG_SCAN = 0x800,
+ UBIFS_MSG_RCVRY = 0x1000,
+};
+
+/* Debugging message type flags for each default debug message level */
+#define UBIFS_MSG_LVL_0 0
+#define UBIFS_MSG_LVL_1 0x1
+#define UBIFS_MSG_LVL_2 0x7f
+#define UBIFS_MSG_LVL_3 0xffff
+
+/*
+ * Debugging check flags (must match chk_names in debug.c).
+ *
+ * UBIFS_CHK_GEN: general checks
+ * UBIFS_CHK_TNC: check TNC
+ * UBIFS_CHK_IDX_SZ: check index size
+ * UBIFS_CHK_ORPH: check orphans
+ * UBIFS_CHK_OLD_IDX: check the old index
+ * UBIFS_CHK_LPROPS: check lprops
+ * UBIFS_CHK_FS: check the file-system
+ */
+enum {
+ UBIFS_CHK_GEN = 0x1,
+ UBIFS_CHK_TNC = 0x2,
+ UBIFS_CHK_IDX_SZ = 0x4,
+ UBIFS_CHK_ORPH = 0x8,
+ UBIFS_CHK_OLD_IDX = 0x10,
+ UBIFS_CHK_LPROPS = 0x20,
+ UBIFS_CHK_FS = 0x40,
+};
+
+/*
+ * Special testing flags (must match tst_names in debug.c).
+ *
+ * UBIFS_TST_FORCE_IN_THE_GAPS: force the use of in-the-gaps method
+ * UBIFS_TST_RCVRY: failure mode for recovery testing
+ */
+enum {
+ UBIFS_TST_FORCE_IN_THE_GAPS = 0x2,
+ UBIFS_TST_RCVRY = 0x4,
+};
+
+#if CONFIG_UBIFS_FS_DEBUG_MSG_LVL == 1
+#define UBIFS_MSG_FLAGS_DEFAULT UBIFS_MSG_LVL_1
+#elif CONFIG_UBIFS_FS_DEBUG_MSG_LVL == 2
+#define UBIFS_MSG_FLAGS_DEFAULT UBIFS_MSG_LVL_2
+#elif CONFIG_UBIFS_FS_DEBUG_MSG_LVL == 3
+#define UBIFS_MSG_FLAGS_DEFAULT UBIFS_MSG_LVL_3
+#else
+#define UBIFS_MSG_FLAGS_DEFAULT UBIFS_MSG_LVL_0
+#endif
+
+#ifdef CONFIG_UBIFS_FS_DEBUG_CHKS
+#define UBIFS_CHK_FLAGS_DEFAULT 0xffffffff
+#else
+#define UBIFS_CHK_FLAGS_DEFAULT 0
+#endif
+
+#define dbg_ntype(type) ""
+#define dbg_cstate(cmt_state) ""
+#define dbg_get_key_dump(c, key) ({})
+#define dbg_dump_inode(c, inode) ({})
+#define dbg_dump_node(c, node) ({})
+#define dbg_dump_budget_req(req) ({})
+#define dbg_dump_lstats(lst) ({})
+#define dbg_dump_budg(c) ({})
+#define dbg_dump_lprop(c, lp) ({})
+#define dbg_dump_lprops(c) ({})
+#define dbg_dump_lpt_info(c) ({})
+#define dbg_dump_leb(c, lnum) ({})
+#define dbg_dump_znode(c, znode) ({})
+#define dbg_dump_heap(c, heap, cat) ({})
+#define dbg_dump_pnode(c, pnode, parent, iip) ({})
+#define dbg_dump_tnc(c) ({})
+#define dbg_dump_index(c) ({})
+
+#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
+#define dbg_old_index_check_init(c, zroot) 0
+#define dbg_check_old_index(c, zroot) 0
+#define dbg_check_cats(c) 0
+#define dbg_check_ltab(c) 0
+#define dbg_chk_lpt_free_spc(c) 0
+#define dbg_chk_lpt_sz(c, action, len) 0
+#define dbg_check_synced_i_size(inode) 0
+#define dbg_check_dir_size(c, dir) 0
+#define dbg_check_tnc(c, x) 0
+#define dbg_check_idx_size(c, idx_size) 0
+#define dbg_check_filesystem(c) 0
+#define dbg_check_heap(c, heap, cat, add_pos) ({})
+#define dbg_check_lprops(c) 0
+#define dbg_check_lpt_nodes(c, cnode, row, col) 0
+#define dbg_force_in_the_gaps_enabled 0
+#define dbg_force_in_the_gaps() 0
+#define dbg_failure_mode 0
+#define dbg_failure_mode_registration(c) ({})
+#define dbg_failure_mode_deregistration(c) ({})
+
+int ubifs_debugging_init(struct ubifs_info *c);
+void ubifs_debugging_exit(struct ubifs_info *c);
+
+#else /* !CONFIG_UBIFS_FS_DEBUG */
+
+#define UBIFS_DBG(op)
+
+/* Use "if (0)" to make compiler check arguments even if debugging is off */
+#define ubifs_assert(expr) do { \
+ if (0 && (expr)) \
+ printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, 0); \
+} while (0)
+
+#define dbg_err(fmt, ...) do { \
+ if (0) \
+ ubifs_err(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dbg_msg(fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG "UBIFS DBG (pid %d): %s: " fmt "\n", \
+ 0, __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define dbg_dump_stack()
+#define ubifs_assert_cmt_locked(c)
+
+#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_jnl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_tnc(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_lp(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_find(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_mnt(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_cmt(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_budg(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_log(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_gc(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_scan(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+#define dbg_rcvry(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
+
+#define DBGKEY(key) ((char *)(key))
+#define DBGKEY1(key) ((char *)(key))
+
+#define ubifs_debugging_init(c) 0
+#define ubifs_debugging_exit(c) ({})
+
+#define dbg_ntype(type) ""
+#define dbg_cstate(cmt_state) ""
+#define dbg_get_key_dump(c, key) ({})
+#define dbg_dump_inode(c, inode) ({})
+#define dbg_dump_node(c, node) ({})
+#define dbg_dump_budget_req(req) ({})
+#define dbg_dump_lstats(lst) ({})
+#define dbg_dump_budg(c) ({})
+#define dbg_dump_lprop(c, lp) ({})
+#define dbg_dump_lprops(c) ({})
+#define dbg_dump_lpt_info(c) ({})
+#define dbg_dump_leb(c, lnum) ({})
+#define dbg_dump_znode(c, znode) ({})
+#define dbg_dump_heap(c, heap, cat) ({})
+#define dbg_dump_pnode(c, pnode, parent, iip) ({})
+#define dbg_dump_tnc(c) ({})
+#define dbg_dump_index(c) ({})
+
+#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
+#define dbg_old_index_check_init(c, zroot) 0
+#define dbg_check_old_index(c, zroot) 0
+#define dbg_check_cats(c) 0
+#define dbg_check_ltab(c) 0
+#define dbg_chk_lpt_free_spc(c) 0
+#define dbg_chk_lpt_sz(c, action, len) 0
+#define dbg_check_synced_i_size(inode) 0
+#define dbg_check_dir_size(c, dir) 0
+#define dbg_check_tnc(c, x) 0
+#define dbg_check_idx_size(c, idx_size) 0
+#define dbg_check_filesystem(c) 0
+#define dbg_check_heap(c, heap, cat, add_pos) ({})
+#define dbg_check_lprops(c) 0
+#define dbg_check_lpt_nodes(c, cnode, row, col) 0
+#define dbg_force_in_the_gaps_enabled 0
+#define dbg_force_in_the_gaps() 0
+#define dbg_failure_mode 0
+#define dbg_failure_mode_registration(c) ({})
+#define dbg_failure_mode_deregistration(c) ({})
+
+#endif /* !CONFIG_UBIFS_FS_DEBUG */
+
+#endif /* !__UBIFS_DEBUG_H__ */
diff --git a/qemu/roms/u-boot/fs/ubifs/io.c b/qemu/roms/u-boot/fs/ubifs/io.c
new file mode 100644
index 000000000..aae5c65ea
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/io.c
@@ -0,0 +1,316 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ * Copyright (C) 2006, 2007 University of Szeged, Hungary
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ * Zoltan Sogor
+ */
+
+/*
+ * This file implements UBIFS I/O subsystem which provides various I/O-related
+ * helper functions (reading/writing/checking/validating nodes) and implements
+ * write-buffering support. Write buffers help to save space which otherwise
+ * would have been wasted for padding to the nearest minimal I/O unit boundary.
+ * Instead, data first goes to the write-buffer and is flushed when the
+ * buffer is full or when it is not used for some time (by timer). This is
+ * similar to the mechanism is used by JFFS2.
+ *
+ * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by
+ * mutexes defined inside these objects. Since sometimes upper-level code
+ * has to lock the write-buffer (e.g. journal space reservation code), many
+ * functions related to write-buffers have "nolock" suffix which means that the
+ * caller has to lock the write-buffer before calling this function.
+ *
+ * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not
+ * aligned, UBIFS starts the next node from the aligned address, and the padded
+ * bytes may contain any rubbish. In other words, UBIFS does not put padding
+ * bytes in those small gaps. Common headers of nodes store real node lengths,
+ * not aligned lengths. Indexing nodes also store real lengths in branches.
+ *
+ * UBIFS uses padding when it pads to the next min. I/O unit. In this case it
+ * uses padding nodes or padding bytes, if the padding node does not fit.
+ *
+ * All UBIFS nodes are protected by CRC checksums and UBIFS checks all nodes
+ * every time they are read from the flash media.
+ */
+
+#include "ubifs.h"
+
+/**
+ * ubifs_ro_mode - switch UBIFS to read read-only mode.
+ * @c: UBIFS file-system description object
+ * @err: error code which is the reason of switching to R/O mode
+ */
+void ubifs_ro_mode(struct ubifs_info *c, int err)
+{
+ if (!c->ro_media) {
+ c->ro_media = 1;
+ c->no_chk_data_crc = 0;
+ ubifs_warn("switched to read-only mode, error %d", err);
+ dbg_dump_stack();
+ }
+}
+
+/**
+ * ubifs_check_node - check node.
+ * @c: UBIFS file-system description object
+ * @buf: node to check
+ * @lnum: logical eraseblock number
+ * @offs: offset within the logical eraseblock
+ * @quiet: print no messages
+ * @must_chk_crc: indicates whether to always check the CRC
+ *
+ * This function checks node magic number and CRC checksum. This function also
+ * validates node length to prevent UBIFS from becoming crazy when an attacker
+ * feeds it a file-system image with incorrect nodes. For example, too large
+ * node length in the common header could cause UBIFS to read memory outside of
+ * allocated buffer when checking the CRC checksum.
+ *
+ * This function may skip data nodes CRC checking if @c->no_chk_data_crc is
+ * true, which is controlled by corresponding UBIFS mount option. However, if
+ * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is
+ * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is
+ * ignored and CRC is checked.
+ *
+ * This function returns zero in case of success and %-EUCLEAN in case of bad
+ * CRC or magic.
+ */
+int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
+ int offs, int quiet, int must_chk_crc)
+{
+ int err = -EINVAL, type, node_len;
+ uint32_t crc, node_crc, magic;
+ const struct ubifs_ch *ch = buf;
+
+ ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(!(offs & 7) && offs < c->leb_size);
+
+ magic = le32_to_cpu(ch->magic);
+ if (magic != UBIFS_NODE_MAGIC) {
+ if (!quiet)
+ ubifs_err("bad magic %#08x, expected %#08x",
+ magic, UBIFS_NODE_MAGIC);
+ err = -EUCLEAN;
+ goto out;
+ }
+
+ type = ch->node_type;
+ if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
+ if (!quiet)
+ ubifs_err("bad node type %d", type);
+ goto out;
+ }
+
+ node_len = le32_to_cpu(ch->len);
+ if (node_len + offs > c->leb_size)
+ goto out_len;
+
+ if (c->ranges[type].max_len == 0) {
+ if (node_len != c->ranges[type].len)
+ goto out_len;
+ } else if (node_len < c->ranges[type].min_len ||
+ node_len > c->ranges[type].max_len)
+ goto out_len;
+
+ if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc &&
+ c->no_chk_data_crc)
+ return 0;
+
+ crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
+ node_crc = le32_to_cpu(ch->crc);
+ if (crc != node_crc) {
+ if (!quiet)
+ ubifs_err("bad CRC: calculated %#08x, read %#08x",
+ crc, node_crc);
+ err = -EUCLEAN;
+ goto out;
+ }
+
+ return 0;
+
+out_len:
+ if (!quiet)
+ ubifs_err("bad node length %d", node_len);
+out:
+ if (!quiet) {
+ ubifs_err("bad node at LEB %d:%d", lnum, offs);
+ dbg_dump_node(c, buf);
+ dbg_dump_stack();
+ }
+ return err;
+}
+
+/**
+ * ubifs_pad - pad flash space.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to put padding to
+ * @pad: how many bytes to pad
+ *
+ * The flash media obliges us to write only in chunks of %c->min_io_size and
+ * when we have to write less data we add padding node to the write-buffer and
+ * pad it to the next minimal I/O unit's boundary. Padding nodes help when the
+ * media is being scanned. If the amount of wasted space is not enough to fit a
+ * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes
+ * pattern (%UBIFS_PADDING_BYTE).
+ *
+ * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is
+ * used.
+ */
+void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
+{
+ uint32_t crc;
+
+ ubifs_assert(pad >= 0 && !(pad & 7));
+
+ if (pad >= UBIFS_PAD_NODE_SZ) {
+ struct ubifs_ch *ch = buf;
+ struct ubifs_pad_node *pad_node = buf;
+
+ ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
+ ch->node_type = UBIFS_PAD_NODE;
+ ch->group_type = UBIFS_NO_NODE_GROUP;
+ ch->padding[0] = ch->padding[1] = 0;
+ ch->sqnum = 0;
+ ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
+ pad -= UBIFS_PAD_NODE_SZ;
+ pad_node->pad_len = cpu_to_le32(pad);
+ crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
+ ch->crc = cpu_to_le32(crc);
+ memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
+ } else if (pad > 0)
+ /* Too little space, padding node won't fit */
+ memset(buf, UBIFS_PADDING_BYTE, pad);
+}
+
+/**
+ * next_sqnum - get next sequence number.
+ * @c: UBIFS file-system description object
+ */
+static unsigned long long next_sqnum(struct ubifs_info *c)
+{
+ unsigned long long sqnum;
+
+ spin_lock(&c->cnt_lock);
+ sqnum = ++c->max_sqnum;
+ spin_unlock(&c->cnt_lock);
+
+ if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
+ if (sqnum >= SQNUM_WATERMARK) {
+ ubifs_err("sequence number overflow %llu, end of life",
+ sqnum);
+ ubifs_ro_mode(c, -EINVAL);
+ }
+ ubifs_warn("running out of sequence numbers, end of life soon");
+ }
+
+ return sqnum;
+}
+
+/**
+ * ubifs_prepare_node - prepare node to be written to flash.
+ * @c: UBIFS file-system description object
+ * @node: the node to pad
+ * @len: node length
+ * @pad: if the buffer has to be padded
+ *
+ * This function prepares node at @node to be written to the media - it
+ * calculates node CRC, fills the common header, and adds proper padding up to
+ * the next minimum I/O unit if @pad is not zero.
+ */
+void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
+{
+ uint32_t crc;
+ struct ubifs_ch *ch = node;
+ unsigned long long sqnum = next_sqnum(c);
+
+ ubifs_assert(len >= UBIFS_CH_SZ);
+
+ ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
+ ch->len = cpu_to_le32(len);
+ ch->group_type = UBIFS_NO_NODE_GROUP;
+ ch->sqnum = cpu_to_le64(sqnum);
+ ch->padding[0] = ch->padding[1] = 0;
+ crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
+ ch->crc = cpu_to_le32(crc);
+
+ if (pad) {
+ len = ALIGN(len, 8);
+ pad = ALIGN(len, c->min_io_size) - len;
+ ubifs_pad(c, node + len, pad);
+ }
+}
+
+/**
+ * ubifs_read_node - read node.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to read to
+ * @type: node type
+ * @len: node length (not aligned)
+ * @lnum: logical eraseblock number
+ * @offs: offset within the logical eraseblock
+ *
+ * This function reads a node of known type and and length, checks it and
+ * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched
+ * and a negative error code in case of failure.
+ */
+int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
+ int lnum, int offs)
+{
+ int err, l;
+ struct ubifs_ch *ch = buf;
+
+ dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
+ ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
+ ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
+ ubifs_assert(!(offs & 7) && offs < c->leb_size);
+ ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
+
+ err = ubi_read(c->ubi, lnum, buf, offs, len);
+ if (err && err != -EBADMSG) {
+ ubifs_err("cannot read node %d from LEB %d:%d, error %d",
+ type, lnum, offs, err);
+ return err;
+ }
+
+ if (type != ch->node_type) {
+ ubifs_err("bad node type (%d but expected %d)",
+ ch->node_type, type);
+ goto out;
+ }
+
+ err = ubifs_check_node(c, buf, lnum, offs, 0, 0);
+ if (err) {
+ ubifs_err("expected node type %d", type);
+ return err;
+ }
+
+ l = le32_to_cpu(ch->len);
+ if (l != len) {
+ ubifs_err("bad node length %d, expected %d", l, len);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ ubifs_err("bad node at LEB %d:%d", lnum, offs);
+ dbg_dump_node(c, buf);
+ dbg_dump_stack();
+ return -EINVAL;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/key.h b/qemu/roms/u-boot/fs/ubifs/key.h
new file mode 100644
index 000000000..efb3430a2
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/key.h
@@ -0,0 +1,557 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This header contains various key-related definitions and helper function.
+ * UBIFS allows several key schemes, so we access key fields only via these
+ * helpers. At the moment only one key scheme is supported.
+ *
+ * Simple key scheme
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * Keys are 64-bits long. First 32-bits are inode number (parent inode number
+ * in case of direntry key). Next 3 bits are node type. The last 29 bits are
+ * 4KiB offset in case of inode node, and direntry hash in case of a direntry
+ * node. We use "r5" hash borrowed from reiserfs.
+ */
+
+#ifndef __UBIFS_KEY_H__
+#define __UBIFS_KEY_H__
+
+/**
+ * key_mask_hash - mask a valid hash value.
+ * @val: value to be masked
+ *
+ * We use hash values as offset in directories, so values %0 and %1 are
+ * reserved for "." and "..". %2 is reserved for "end of readdir" marker. This
+ * function makes sure the reserved values are not used.
+ */
+static inline uint32_t key_mask_hash(uint32_t hash)
+{
+ hash &= UBIFS_S_KEY_HASH_MASK;
+ if (unlikely(hash <= 2))
+ hash += 3;
+ return hash;
+}
+
+/**
+ * key_r5_hash - R5 hash function (borrowed from reiserfs).
+ * @s: direntry name
+ * @len: name length
+ */
+static inline uint32_t key_r5_hash(const char *s, int len)
+{
+ uint32_t a = 0;
+ const signed char *str = (const signed char *)s;
+
+ while (*str) {
+ a += *str << 4;
+ a += *str >> 4;
+ a *= 11;
+ str++;
+ }
+
+ return key_mask_hash(a);
+}
+
+/**
+ * key_test_hash - testing hash function.
+ * @str: direntry name
+ * @len: name length
+ */
+static inline uint32_t key_test_hash(const char *str, int len)
+{
+ uint32_t a = 0;
+
+ len = min_t(uint32_t, len, 4);
+ memcpy(&a, str, len);
+ return key_mask_hash(a);
+}
+
+/**
+ * ino_key_init - initialize inode key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: inode number
+ */
+static inline void ino_key_init(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum)
+{
+ key->u32[0] = inum;
+ key->u32[1] = UBIFS_INO_KEY << UBIFS_S_KEY_BLOCK_BITS;
+}
+
+/**
+ * ino_key_init_flash - initialize on-flash inode key.
+ * @c: UBIFS file-system description object
+ * @k: key to initialize
+ * @inum: inode number
+ */
+static inline void ino_key_init_flash(const struct ubifs_info *c, void *k,
+ ino_t inum)
+{
+ union ubifs_key *key = k;
+
+ key->j32[0] = cpu_to_le32(inum);
+ key->j32[1] = cpu_to_le32(UBIFS_INO_KEY << UBIFS_S_KEY_BLOCK_BITS);
+ memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
+}
+
+/**
+ * lowest_ino_key - get the lowest possible inode key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: inode number
+ */
+static inline void lowest_ino_key(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum)
+{
+ key->u32[0] = inum;
+ key->u32[1] = 0;
+}
+
+/**
+ * highest_ino_key - get the highest possible inode key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: inode number
+ */
+static inline void highest_ino_key(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum)
+{
+ key->u32[0] = inum;
+ key->u32[1] = 0xffffffff;
+}
+
+/**
+ * dent_key_init - initialize directory entry key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: parent inode number
+ * @nm: direntry name and length
+ */
+static inline void dent_key_init(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum,
+ const struct qstr *nm)
+{
+ uint32_t hash = c->key_hash(nm->name, nm->len);
+
+ ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ key->u32[0] = inum;
+ key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS);
+}
+
+/**
+ * dent_key_init_hash - initialize directory entry key without re-calculating
+ * hash function.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: parent inode number
+ * @hash: direntry name hash
+ */
+static inline void dent_key_init_hash(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum,
+ uint32_t hash)
+{
+ ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ key->u32[0] = inum;
+ key->u32[1] = hash | (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS);
+}
+
+/**
+ * dent_key_init_flash - initialize on-flash directory entry key.
+ * @c: UBIFS file-system description object
+ * @k: key to initialize
+ * @inum: parent inode number
+ * @nm: direntry name and length
+ */
+static inline void dent_key_init_flash(const struct ubifs_info *c, void *k,
+ ino_t inum, const struct qstr *nm)
+{
+ union ubifs_key *key = k;
+ uint32_t hash = c->key_hash(nm->name, nm->len);
+
+ ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ key->j32[0] = cpu_to_le32(inum);
+ key->j32[1] = cpu_to_le32(hash |
+ (UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS));
+ memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
+}
+
+/**
+ * lowest_dent_key - get the lowest possible directory entry key.
+ * @c: UBIFS file-system description object
+ * @key: where to store the lowest key
+ * @inum: parent inode number
+ */
+static inline void lowest_dent_key(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum)
+{
+ key->u32[0] = inum;
+ key->u32[1] = UBIFS_DENT_KEY << UBIFS_S_KEY_HASH_BITS;
+}
+
+/**
+ * xent_key_init - initialize extended attribute entry key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: host inode number
+ * @nm: extended attribute entry name and length
+ */
+static inline void xent_key_init(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum,
+ const struct qstr *nm)
+{
+ uint32_t hash = c->key_hash(nm->name, nm->len);
+
+ ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ key->u32[0] = inum;
+ key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS);
+}
+
+/**
+ * xent_key_init_hash - initialize extended attribute entry key without
+ * re-calculating hash function.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: host inode number
+ * @hash: extended attribute entry name hash
+ */
+static inline void xent_key_init_hash(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum,
+ uint32_t hash)
+{
+ ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ key->u32[0] = inum;
+ key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS);
+}
+
+/**
+ * xent_key_init_flash - initialize on-flash extended attribute entry key.
+ * @c: UBIFS file-system description object
+ * @k: key to initialize
+ * @inum: host inode number
+ * @nm: extended attribute entry name and length
+ */
+static inline void xent_key_init_flash(const struct ubifs_info *c, void *k,
+ ino_t inum, const struct qstr *nm)
+{
+ union ubifs_key *key = k;
+ uint32_t hash = c->key_hash(nm->name, nm->len);
+
+ ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
+ key->j32[0] = cpu_to_le32(inum);
+ key->j32[1] = cpu_to_le32(hash |
+ (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS));
+ memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
+}
+
+/**
+ * lowest_xent_key - get the lowest possible extended attribute entry key.
+ * @c: UBIFS file-system description object
+ * @key: where to store the lowest key
+ * @inum: host inode number
+ */
+static inline void lowest_xent_key(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum)
+{
+ key->u32[0] = inum;
+ key->u32[1] = UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS;
+}
+
+/**
+ * data_key_init - initialize data key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: inode number
+ * @block: block number
+ */
+static inline void data_key_init(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum,
+ unsigned int block)
+{
+ ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK));
+ key->u32[0] = inum;
+ key->u32[1] = block | (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS);
+}
+
+/**
+ * data_key_init_flash - initialize on-flash data key.
+ * @c: UBIFS file-system description object
+ * @k: key to initialize
+ * @inum: inode number
+ * @block: block number
+ */
+static inline void data_key_init_flash(const struct ubifs_info *c, void *k,
+ ino_t inum, unsigned int block)
+{
+ union ubifs_key *key = k;
+
+ ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK));
+ key->j32[0] = cpu_to_le32(inum);
+ key->j32[1] = cpu_to_le32(block |
+ (UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS));
+ memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
+}
+
+/**
+ * trun_key_init - initialize truncation node key.
+ * @c: UBIFS file-system description object
+ * @key: key to initialize
+ * @inum: inode number
+ *
+ * Note, UBIFS does not have truncation keys on the media and this function is
+ * only used for purposes of replay.
+ */
+static inline void trun_key_init(const struct ubifs_info *c,
+ union ubifs_key *key, ino_t inum)
+{
+ key->u32[0] = inum;
+ key->u32[1] = UBIFS_TRUN_KEY << UBIFS_S_KEY_BLOCK_BITS;
+}
+
+/**
+ * key_type - get key type.
+ * @c: UBIFS file-system description object
+ * @key: key to get type of
+ */
+static inline int key_type(const struct ubifs_info *c,
+ const union ubifs_key *key)
+{
+ return key->u32[1] >> UBIFS_S_KEY_BLOCK_BITS;
+}
+
+/**
+ * key_type_flash - get type of a on-flash formatted key.
+ * @c: UBIFS file-system description object
+ * @k: key to get type of
+ */
+static inline int key_type_flash(const struct ubifs_info *c, const void *k)
+{
+ const union ubifs_key *key = k;
+
+ return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS;
+}
+
+/**
+ * key_inum - fetch inode number from key.
+ * @c: UBIFS file-system description object
+ * @k: key to fetch inode number from
+ */
+static inline ino_t key_inum(const struct ubifs_info *c, const void *k)
+{
+ const union ubifs_key *key = k;
+
+ return key->u32[0];
+}
+
+/**
+ * key_inum_flash - fetch inode number from an on-flash formatted key.
+ * @c: UBIFS file-system description object
+ * @k: key to fetch inode number from
+ */
+static inline ino_t key_inum_flash(const struct ubifs_info *c, const void *k)
+{
+ const union ubifs_key *key = k;
+
+ return le32_to_cpu(key->j32[0]);
+}
+
+/**
+ * key_hash - get directory entry hash.
+ * @c: UBIFS file-system description object
+ * @key: the key to get hash from
+ */
+static inline int key_hash(const struct ubifs_info *c,
+ const union ubifs_key *key)
+{
+ return key->u32[1] & UBIFS_S_KEY_HASH_MASK;
+}
+
+/**
+ * key_hash_flash - get directory entry hash from an on-flash formatted key.
+ * @c: UBIFS file-system description object
+ * @k: the key to get hash from
+ */
+static inline int key_hash_flash(const struct ubifs_info *c, const void *k)
+{
+ const union ubifs_key *key = k;
+
+ return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_HASH_MASK;
+}
+
+/**
+ * key_block - get data block number.
+ * @c: UBIFS file-system description object
+ * @key: the key to get the block number from
+ */
+static inline unsigned int key_block(const struct ubifs_info *c,
+ const union ubifs_key *key)
+{
+ return key->u32[1] & UBIFS_S_KEY_BLOCK_MASK;
+}
+
+/**
+ * key_block_flash - get data block number from an on-flash formatted key.
+ * @c: UBIFS file-system description object
+ * @k: the key to get the block number from
+ */
+static inline unsigned int key_block_flash(const struct ubifs_info *c,
+ const void *k)
+{
+ const union ubifs_key *key = k;
+
+ return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK;
+}
+
+/**
+ * key_read - transform a key to in-memory format.
+ * @c: UBIFS file-system description object
+ * @from: the key to transform
+ * @to: the key to store the result
+ */
+static inline void key_read(const struct ubifs_info *c, const void *from,
+ union ubifs_key *to)
+{
+ const union ubifs_key *f = from;
+
+ to->u32[0] = le32_to_cpu(f->j32[0]);
+ to->u32[1] = le32_to_cpu(f->j32[1]);
+}
+
+/**
+ * key_write - transform a key from in-memory format.
+ * @c: UBIFS file-system description object
+ * @from: the key to transform
+ * @to: the key to store the result
+ */
+static inline void key_write(const struct ubifs_info *c,
+ const union ubifs_key *from, void *to)
+{
+ union ubifs_key *t = to;
+
+ t->j32[0] = cpu_to_le32(from->u32[0]);
+ t->j32[1] = cpu_to_le32(from->u32[1]);
+ memset(to + 8, 0, UBIFS_MAX_KEY_LEN - 8);
+}
+
+/**
+ * key_write_idx - transform a key from in-memory format for the index.
+ * @c: UBIFS file-system description object
+ * @from: the key to transform
+ * @to: the key to store the result
+ */
+static inline void key_write_idx(const struct ubifs_info *c,
+ const union ubifs_key *from, void *to)
+{
+ union ubifs_key *t = to;
+
+ t->j32[0] = cpu_to_le32(from->u32[0]);
+ t->j32[1] = cpu_to_le32(from->u32[1]);
+}
+
+/**
+ * key_copy - copy a key.
+ * @c: UBIFS file-system description object
+ * @from: the key to copy from
+ * @to: the key to copy to
+ */
+static inline void key_copy(const struct ubifs_info *c,
+ const union ubifs_key *from, union ubifs_key *to)
+{
+ to->u64[0] = from->u64[0];
+}
+
+/**
+ * keys_cmp - compare keys.
+ * @c: UBIFS file-system description object
+ * @key1: the first key to compare
+ * @key2: the second key to compare
+ *
+ * This function compares 2 keys and returns %-1 if @key1 is less than
+ * @key2, %0 if the keys are equivalent and %1 if @key1 is greater than @key2.
+ */
+static inline int keys_cmp(const struct ubifs_info *c,
+ const union ubifs_key *key1,
+ const union ubifs_key *key2)
+{
+ if (key1->u32[0] < key2->u32[0])
+ return -1;
+ if (key1->u32[0] > key2->u32[0])
+ return 1;
+ if (key1->u32[1] < key2->u32[1])
+ return -1;
+ if (key1->u32[1] > key2->u32[1])
+ return 1;
+
+ return 0;
+}
+
+/**
+ * keys_eq - determine if keys are equivalent.
+ * @c: UBIFS file-system description object
+ * @key1: the first key to compare
+ * @key2: the second key to compare
+ *
+ * This function compares 2 keys and returns %1 if @key1 is equal to @key2 and
+ * %0 if not.
+ */
+static inline int keys_eq(const struct ubifs_info *c,
+ const union ubifs_key *key1,
+ const union ubifs_key *key2)
+{
+ if (key1->u32[0] != key2->u32[0])
+ return 0;
+ if (key1->u32[1] != key2->u32[1])
+ return 0;
+ return 1;
+}
+
+/**
+ * is_hash_key - is a key vulnerable to hash collisions.
+ * @c: UBIFS file-system description object
+ * @key: key
+ *
+ * This function returns %1 if @key is a hashed key or %0 otherwise.
+ */
+static inline int is_hash_key(const struct ubifs_info *c,
+ const union ubifs_key *key)
+{
+ int type = key_type(c, key);
+
+ return type == UBIFS_DENT_KEY || type == UBIFS_XENT_KEY;
+}
+
+/**
+ * key_max_inode_size - get maximum file size allowed by current key format.
+ * @c: UBIFS file-system description object
+ */
+static inline unsigned long long key_max_inode_size(const struct ubifs_info *c)
+{
+ switch (c->key_fmt) {
+ case UBIFS_SIMPLE_KEY_FMT:
+ return (1ULL << UBIFS_S_KEY_BLOCK_BITS) * UBIFS_BLOCK_SIZE;
+ default:
+ return 0;
+ }
+}
+#endif /* !__UBIFS_KEY_H__ */
diff --git a/qemu/roms/u-boot/fs/ubifs/log.c b/qemu/roms/u-boot/fs/ubifs/log.c
new file mode 100644
index 000000000..68a9bd98f
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/log.c
@@ -0,0 +1,104 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This file is a part of UBIFS journal implementation and contains various
+ * functions which manipulate the log. The log is a fixed area on the flash
+ * which does not contain any data but refers to buds. The log is a part of the
+ * journal.
+ */
+
+#include "ubifs.h"
+
+/**
+ * ubifs_search_bud - search bud LEB.
+ * @c: UBIFS file-system description object
+ * @lnum: logical eraseblock number to search
+ *
+ * This function searches bud LEB @lnum. Returns bud description object in case
+ * of success and %NULL if there is no bud with this LEB number.
+ */
+struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum)
+{
+ struct rb_node *p;
+ struct ubifs_bud *bud;
+
+ spin_lock(&c->buds_lock);
+ p = c->buds.rb_node;
+ while (p) {
+ bud = rb_entry(p, struct ubifs_bud, rb);
+ if (lnum < bud->lnum)
+ p = p->rb_left;
+ else if (lnum > bud->lnum)
+ p = p->rb_right;
+ else {
+ spin_unlock(&c->buds_lock);
+ return bud;
+ }
+ }
+ spin_unlock(&c->buds_lock);
+ return NULL;
+}
+
+/**
+ * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list.
+ * @c: UBIFS file-system description object
+ * @bud: the bud to add
+ */
+void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
+{
+ struct rb_node **p, *parent = NULL;
+ struct ubifs_bud *b;
+ struct ubifs_jhead *jhead;
+
+ spin_lock(&c->buds_lock);
+ p = &c->buds.rb_node;
+ while (*p) {
+ parent = *p;
+ b = rb_entry(parent, struct ubifs_bud, rb);
+ ubifs_assert(bud->lnum != b->lnum);
+ if (bud->lnum < b->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&bud->rb, parent, p);
+ rb_insert_color(&bud->rb, &c->buds);
+ if (c->jheads) {
+ jhead = &c->jheads[bud->jhead];
+ list_add_tail(&bud->list, &jhead->buds_list);
+ } else
+ ubifs_assert(c->replaying && (c->vfs_sb->s_flags & MS_RDONLY));
+
+ /*
+ * Note, although this is a new bud, we anyway account this space now,
+ * before any data has been written to it, because this is about to
+ * guarantee fixed mount time, and this bud will anyway be read and
+ * scanned.
+ */
+ c->bud_bytes += c->leb_size - bud->start;
+
+ dbg_log("LEB %d:%d, jhead %d, bud_bytes %lld", bud->lnum,
+ bud->start, bud->jhead, c->bud_bytes);
+ spin_unlock(&c->buds_lock);
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/lprops.c b/qemu/roms/u-boot/fs/ubifs/lprops.c
new file mode 100644
index 000000000..8ce4949fc
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/lprops.c
@@ -0,0 +1,842 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements the functions that access LEB properties and their
+ * categories. LEBs are categorized based on the needs of UBIFS, and the
+ * categories are stored as either heaps or lists to provide a fast way of
+ * finding a LEB in a particular category. For example, UBIFS may need to find
+ * an empty LEB for the journal, or a very dirty LEB for garbage collection.
+ */
+
+#include "ubifs.h"
+
+/**
+ * get_heap_comp_val - get the LEB properties value for heap comparisons.
+ * @lprops: LEB properties
+ * @cat: LEB category
+ */
+static int get_heap_comp_val(struct ubifs_lprops *lprops, int cat)
+{
+ switch (cat) {
+ case LPROPS_FREE:
+ return lprops->free;
+ case LPROPS_DIRTY_IDX:
+ return lprops->free + lprops->dirty;
+ default:
+ return lprops->dirty;
+ }
+}
+
+/**
+ * move_up_lpt_heap - move a new heap entry up as far as possible.
+ * @c: UBIFS file-system description object
+ * @heap: LEB category heap
+ * @lprops: LEB properties to move
+ * @cat: LEB category
+ *
+ * New entries to a heap are added at the bottom and then moved up until the
+ * parent's value is greater. In the case of LPT's category heaps, the value
+ * is either the amount of free space or the amount of dirty space, depending
+ * on the category.
+ */
+static void move_up_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap,
+ struct ubifs_lprops *lprops, int cat)
+{
+ int val1, val2, hpos;
+
+ hpos = lprops->hpos;
+ if (!hpos)
+ return; /* Already top of the heap */
+ val1 = get_heap_comp_val(lprops, cat);
+ /* Compare to parent and, if greater, move up the heap */
+ do {
+ int ppos = (hpos - 1) / 2;
+
+ val2 = get_heap_comp_val(heap->arr[ppos], cat);
+ if (val2 >= val1)
+ return;
+ /* Greater than parent so move up */
+ heap->arr[ppos]->hpos = hpos;
+ heap->arr[hpos] = heap->arr[ppos];
+ heap->arr[ppos] = lprops;
+ lprops->hpos = ppos;
+ hpos = ppos;
+ } while (hpos);
+}
+
+/**
+ * adjust_lpt_heap - move a changed heap entry up or down the heap.
+ * @c: UBIFS file-system description object
+ * @heap: LEB category heap
+ * @lprops: LEB properties to move
+ * @hpos: heap position of @lprops
+ * @cat: LEB category
+ *
+ * Changed entries in a heap are moved up or down until the parent's value is
+ * greater. In the case of LPT's category heaps, the value is either the amount
+ * of free space or the amount of dirty space, depending on the category.
+ */
+static void adjust_lpt_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap,
+ struct ubifs_lprops *lprops, int hpos, int cat)
+{
+ int val1, val2, val3, cpos;
+
+ val1 = get_heap_comp_val(lprops, cat);
+ /* Compare to parent and, if greater than parent, move up the heap */
+ if (hpos) {
+ int ppos = (hpos - 1) / 2;
+
+ val2 = get_heap_comp_val(heap->arr[ppos], cat);
+ if (val1 > val2) {
+ /* Greater than parent so move up */
+ while (1) {
+ heap->arr[ppos]->hpos = hpos;
+ heap->arr[hpos] = heap->arr[ppos];
+ heap->arr[ppos] = lprops;
+ lprops->hpos = ppos;
+ hpos = ppos;
+ if (!hpos)
+ return;
+ ppos = (hpos - 1) / 2;
+ val2 = get_heap_comp_val(heap->arr[ppos], cat);
+ if (val1 <= val2)
+ return;
+ /* Still greater than parent so keep going */
+ }
+ }
+ }
+
+ /* Not greater than parent, so compare to children */
+ while (1) {
+ /* Compare to left child */
+ cpos = hpos * 2 + 1;
+ if (cpos >= heap->cnt)
+ return;
+ val2 = get_heap_comp_val(heap->arr[cpos], cat);
+ if (val1 < val2) {
+ /* Less than left child, so promote biggest child */
+ if (cpos + 1 < heap->cnt) {
+ val3 = get_heap_comp_val(heap->arr[cpos + 1],
+ cat);
+ if (val3 > val2)
+ cpos += 1; /* Right child is bigger */
+ }
+ heap->arr[cpos]->hpos = hpos;
+ heap->arr[hpos] = heap->arr[cpos];
+ heap->arr[cpos] = lprops;
+ lprops->hpos = cpos;
+ hpos = cpos;
+ continue;
+ }
+ /* Compare to right child */
+ cpos += 1;
+ if (cpos >= heap->cnt)
+ return;
+ val3 = get_heap_comp_val(heap->arr[cpos], cat);
+ if (val1 < val3) {
+ /* Less than right child, so promote right child */
+ heap->arr[cpos]->hpos = hpos;
+ heap->arr[hpos] = heap->arr[cpos];
+ heap->arr[cpos] = lprops;
+ lprops->hpos = cpos;
+ hpos = cpos;
+ continue;
+ }
+ return;
+ }
+}
+
+/**
+ * add_to_lpt_heap - add LEB properties to a LEB category heap.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties to add
+ * @cat: LEB category
+ *
+ * This function returns %1 if @lprops is added to the heap for LEB category
+ * @cat, otherwise %0 is returned because the heap is full.
+ */
+static int add_to_lpt_heap(struct ubifs_info *c, struct ubifs_lprops *lprops,
+ int cat)
+{
+ struct ubifs_lpt_heap *heap = &c->lpt_heap[cat - 1];
+
+ if (heap->cnt >= heap->max_cnt) {
+ const int b = LPT_HEAP_SZ / 2 - 1;
+ int cpos, val1, val2;
+
+ /* Compare to some other LEB on the bottom of heap */
+ /* Pick a position kind of randomly */
+ cpos = (((size_t)lprops >> 4) & b) + b;
+ ubifs_assert(cpos >= b);
+ ubifs_assert(cpos < LPT_HEAP_SZ);
+ ubifs_assert(cpos < heap->cnt);
+
+ val1 = get_heap_comp_val(lprops, cat);
+ val2 = get_heap_comp_val(heap->arr[cpos], cat);
+ if (val1 > val2) {
+ struct ubifs_lprops *lp;
+
+ lp = heap->arr[cpos];
+ lp->flags &= ~LPROPS_CAT_MASK;
+ lp->flags |= LPROPS_UNCAT;
+ list_add(&lp->list, &c->uncat_list);
+ lprops->hpos = cpos;
+ heap->arr[cpos] = lprops;
+ move_up_lpt_heap(c, heap, lprops, cat);
+ dbg_check_heap(c, heap, cat, lprops->hpos);
+ return 1; /* Added to heap */
+ }
+ dbg_check_heap(c, heap, cat, -1);
+ return 0; /* Not added to heap */
+ } else {
+ lprops->hpos = heap->cnt++;
+ heap->arr[lprops->hpos] = lprops;
+ move_up_lpt_heap(c, heap, lprops, cat);
+ dbg_check_heap(c, heap, cat, lprops->hpos);
+ return 1; /* Added to heap */
+ }
+}
+
+/**
+ * remove_from_lpt_heap - remove LEB properties from a LEB category heap.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties to remove
+ * @cat: LEB category
+ */
+static void remove_from_lpt_heap(struct ubifs_info *c,
+ struct ubifs_lprops *lprops, int cat)
+{
+ struct ubifs_lpt_heap *heap;
+ int hpos = lprops->hpos;
+
+ heap = &c->lpt_heap[cat - 1];
+ ubifs_assert(hpos >= 0 && hpos < heap->cnt);
+ ubifs_assert(heap->arr[hpos] == lprops);
+ heap->cnt -= 1;
+ if (hpos < heap->cnt) {
+ heap->arr[hpos] = heap->arr[heap->cnt];
+ heap->arr[hpos]->hpos = hpos;
+ adjust_lpt_heap(c, heap, heap->arr[hpos], hpos, cat);
+ }
+ dbg_check_heap(c, heap, cat, -1);
+}
+
+/**
+ * lpt_heap_replace - replace lprops in a category heap.
+ * @c: UBIFS file-system description object
+ * @old_lprops: LEB properties to replace
+ * @new_lprops: LEB properties with which to replace
+ * @cat: LEB category
+ *
+ * During commit it is sometimes necessary to copy a pnode (see dirty_cow_pnode)
+ * and the lprops that the pnode contains. When that happens, references in
+ * the category heaps to those lprops must be updated to point to the new
+ * lprops. This function does that.
+ */
+static void lpt_heap_replace(struct ubifs_info *c,
+ struct ubifs_lprops *old_lprops,
+ struct ubifs_lprops *new_lprops, int cat)
+{
+ struct ubifs_lpt_heap *heap;
+ int hpos = new_lprops->hpos;
+
+ heap = &c->lpt_heap[cat - 1];
+ heap->arr[hpos] = new_lprops;
+}
+
+/**
+ * ubifs_add_to_cat - add LEB properties to a category list or heap.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties to add
+ * @cat: LEB category to which to add
+ *
+ * LEB properties are categorized to enable fast find operations.
+ */
+void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
+ int cat)
+{
+ switch (cat) {
+ case LPROPS_DIRTY:
+ case LPROPS_DIRTY_IDX:
+ case LPROPS_FREE:
+ if (add_to_lpt_heap(c, lprops, cat))
+ break;
+ /* No more room on heap so make it uncategorized */
+ cat = LPROPS_UNCAT;
+ /* Fall through */
+ case LPROPS_UNCAT:
+ list_add(&lprops->list, &c->uncat_list);
+ break;
+ case LPROPS_EMPTY:
+ list_add(&lprops->list, &c->empty_list);
+ break;
+ case LPROPS_FREEABLE:
+ list_add(&lprops->list, &c->freeable_list);
+ c->freeable_cnt += 1;
+ break;
+ case LPROPS_FRDI_IDX:
+ list_add(&lprops->list, &c->frdi_idx_list);
+ break;
+ default:
+ ubifs_assert(0);
+ }
+ lprops->flags &= ~LPROPS_CAT_MASK;
+ lprops->flags |= cat;
+}
+
+/**
+ * ubifs_remove_from_cat - remove LEB properties from a category list or heap.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties to remove
+ * @cat: LEB category from which to remove
+ *
+ * LEB properties are categorized to enable fast find operations.
+ */
+static void ubifs_remove_from_cat(struct ubifs_info *c,
+ struct ubifs_lprops *lprops, int cat)
+{
+ switch (cat) {
+ case LPROPS_DIRTY:
+ case LPROPS_DIRTY_IDX:
+ case LPROPS_FREE:
+ remove_from_lpt_heap(c, lprops, cat);
+ break;
+ case LPROPS_FREEABLE:
+ c->freeable_cnt -= 1;
+ ubifs_assert(c->freeable_cnt >= 0);
+ /* Fall through */
+ case LPROPS_UNCAT:
+ case LPROPS_EMPTY:
+ case LPROPS_FRDI_IDX:
+ ubifs_assert(!list_empty(&lprops->list));
+ list_del(&lprops->list);
+ break;
+ default:
+ ubifs_assert(0);
+ }
+}
+
+/**
+ * ubifs_replace_cat - replace lprops in a category list or heap.
+ * @c: UBIFS file-system description object
+ * @old_lprops: LEB properties to replace
+ * @new_lprops: LEB properties with which to replace
+ *
+ * During commit it is sometimes necessary to copy a pnode (see dirty_cow_pnode)
+ * and the lprops that the pnode contains. When that happens, references in
+ * category lists and heaps must be replaced. This function does that.
+ */
+void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
+ struct ubifs_lprops *new_lprops)
+{
+ int cat;
+
+ cat = new_lprops->flags & LPROPS_CAT_MASK;
+ switch (cat) {
+ case LPROPS_DIRTY:
+ case LPROPS_DIRTY_IDX:
+ case LPROPS_FREE:
+ lpt_heap_replace(c, old_lprops, new_lprops, cat);
+ break;
+ case LPROPS_UNCAT:
+ case LPROPS_EMPTY:
+ case LPROPS_FREEABLE:
+ case LPROPS_FRDI_IDX:
+ list_replace(&old_lprops->list, &new_lprops->list);
+ break;
+ default:
+ ubifs_assert(0);
+ }
+}
+
+/**
+ * ubifs_ensure_cat - ensure LEB properties are categorized.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties
+ *
+ * A LEB may have fallen off of the bottom of a heap, and ended up as
+ * uncategorized even though it has enough space for us now. If that is the case
+ * this function will put the LEB back onto a heap.
+ */
+void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops)
+{
+ int cat = lprops->flags & LPROPS_CAT_MASK;
+
+ if (cat != LPROPS_UNCAT)
+ return;
+ cat = ubifs_categorize_lprops(c, lprops);
+ if (cat == LPROPS_UNCAT)
+ return;
+ ubifs_remove_from_cat(c, lprops, LPROPS_UNCAT);
+ ubifs_add_to_cat(c, lprops, cat);
+}
+
+/**
+ * ubifs_categorize_lprops - categorize LEB properties.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties to categorize
+ *
+ * LEB properties are categorized to enable fast find operations. This function
+ * returns the LEB category to which the LEB properties belong. Note however
+ * that if the LEB category is stored as a heap and the heap is full, the
+ * LEB properties may have their category changed to %LPROPS_UNCAT.
+ */
+int ubifs_categorize_lprops(const struct ubifs_info *c,
+ const struct ubifs_lprops *lprops)
+{
+ if (lprops->flags & LPROPS_TAKEN)
+ return LPROPS_UNCAT;
+
+ if (lprops->free == c->leb_size) {
+ ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ return LPROPS_EMPTY;
+ }
+
+ if (lprops->free + lprops->dirty == c->leb_size) {
+ if (lprops->flags & LPROPS_INDEX)
+ return LPROPS_FRDI_IDX;
+ else
+ return LPROPS_FREEABLE;
+ }
+
+ if (lprops->flags & LPROPS_INDEX) {
+ if (lprops->dirty + lprops->free >= c->min_idx_node_sz)
+ return LPROPS_DIRTY_IDX;
+ } else {
+ if (lprops->dirty >= c->dead_wm &&
+ lprops->dirty > lprops->free)
+ return LPROPS_DIRTY;
+ if (lprops->free > 0)
+ return LPROPS_FREE;
+ }
+
+ return LPROPS_UNCAT;
+}
+
+/**
+ * change_category - change LEB properties category.
+ * @c: UBIFS file-system description object
+ * @lprops: LEB properties to recategorize
+ *
+ * LEB properties are categorized to enable fast find operations. When the LEB
+ * properties change they must be recategorized.
+ */
+static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops)
+{
+ int old_cat = lprops->flags & LPROPS_CAT_MASK;
+ int new_cat = ubifs_categorize_lprops(c, lprops);
+
+ if (old_cat == new_cat) {
+ struct ubifs_lpt_heap *heap = &c->lpt_heap[new_cat - 1];
+
+ /* lprops on a heap now must be moved up or down */
+ if (new_cat < 1 || new_cat > LPROPS_HEAP_CNT)
+ return; /* Not on a heap */
+ heap = &c->lpt_heap[new_cat - 1];
+ adjust_lpt_heap(c, heap, lprops, lprops->hpos, new_cat);
+ } else {
+ ubifs_remove_from_cat(c, lprops, old_cat);
+ ubifs_add_to_cat(c, lprops, new_cat);
+ }
+}
+
+/**
+ * calc_dark - calculate LEB dark space size.
+ * @c: the UBIFS file-system description object
+ * @spc: amount of free and dirty space in the LEB
+ *
+ * This function calculates amount of dark space in an LEB which has @spc bytes
+ * of free and dirty space. Returns the calculations result.
+ *
+ * Dark space is the space which is not always usable - it depends on which
+ * nodes are written in which order. E.g., if an LEB has only 512 free bytes,
+ * it is dark space, because it cannot fit a large data node. So UBIFS cannot
+ * count on this LEB and treat these 512 bytes as usable because it is not true
+ * if, for example, only big chunks of uncompressible data will be written to
+ * the FS.
+ */
+static int calc_dark(struct ubifs_info *c, int spc)
+{
+ ubifs_assert(!(spc & 7));
+
+ if (spc < c->dark_wm)
+ return spc;
+
+ /*
+ * If we have slightly more space then the dark space watermark, we can
+ * anyway safely assume it we'll be able to write a node of the
+ * smallest size there.
+ */
+ if (spc - c->dark_wm < MIN_WRITE_SZ)
+ return spc - MIN_WRITE_SZ;
+
+ return c->dark_wm;
+}
+
+/**
+ * is_lprops_dirty - determine if LEB properties are dirty.
+ * @c: the UBIFS file-system description object
+ * @lprops: LEB properties to test
+ */
+static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops)
+{
+ struct ubifs_pnode *pnode;
+ int pos;
+
+ pos = (lprops->lnum - c->main_first) & (UBIFS_LPT_FANOUT - 1);
+ pnode = (struct ubifs_pnode *)container_of(lprops - pos,
+ struct ubifs_pnode,
+ lprops[0]);
+ return !test_bit(COW_ZNODE, &pnode->flags) &&
+ test_bit(DIRTY_CNODE, &pnode->flags);
+}
+
+/**
+ * ubifs_change_lp - change LEB properties.
+ * @c: the UBIFS file-system description object
+ * @lp: LEB properties to change
+ * @free: new free space amount
+ * @dirty: new dirty space amount
+ * @flags: new flags
+ * @idx_gc_cnt: change to the count of idx_gc list
+ *
+ * This function changes LEB properties (@free, @dirty or @flag). However, the
+ * property which has the %LPROPS_NC value is not changed. Returns a pointer to
+ * the updated LEB properties on success and a negative error code on failure.
+ *
+ * Note, the LEB properties may have had to be copied (due to COW) and
+ * consequently the pointer returned may not be the same as the pointer
+ * passed.
+ */
+const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
+ const struct ubifs_lprops *lp,
+ int free, int dirty, int flags,
+ int idx_gc_cnt)
+{
+ /*
+ * This is the only function that is allowed to change lprops, so we
+ * discard the const qualifier.
+ */
+ struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp;
+
+ dbg_lp("LEB %d, free %d, dirty %d, flags %d",
+ lprops->lnum, free, dirty, flags);
+
+ ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c->lst.empty_lebs >= 0 &&
+ c->lst.empty_lebs <= c->main_lebs);
+ ubifs_assert(c->freeable_cnt >= 0);
+ ubifs_assert(c->freeable_cnt <= c->main_lebs);
+ ubifs_assert(c->lst.taken_empty_lebs >= 0);
+ ubifs_assert(c->lst.taken_empty_lebs <= c->lst.empty_lebs);
+ ubifs_assert(!(c->lst.total_free & 7) && !(c->lst.total_dirty & 7));
+ ubifs_assert(!(c->lst.total_dead & 7) && !(c->lst.total_dark & 7));
+ ubifs_assert(!(c->lst.total_used & 7));
+ ubifs_assert(free == LPROPS_NC || free >= 0);
+ ubifs_assert(dirty == LPROPS_NC || dirty >= 0);
+
+ if (!is_lprops_dirty(c, lprops)) {
+ lprops = ubifs_lpt_lookup_dirty(c, lprops->lnum);
+ if (IS_ERR(lprops))
+ return lprops;
+ } else
+ ubifs_assert(lprops == ubifs_lpt_lookup_dirty(c, lprops->lnum));
+
+ ubifs_assert(!(lprops->free & 7) && !(lprops->dirty & 7));
+
+ spin_lock(&c->space_lock);
+ if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size)
+ c->lst.taken_empty_lebs -= 1;
+
+ if (!(lprops->flags & LPROPS_INDEX)) {
+ int old_spc;
+
+ old_spc = lprops->free + lprops->dirty;
+ if (old_spc < c->dead_wm)
+ c->lst.total_dead -= old_spc;
+ else
+ c->lst.total_dark -= calc_dark(c, old_spc);
+
+ c->lst.total_used -= c->leb_size - old_spc;
+ }
+
+ if (free != LPROPS_NC) {
+ free = ALIGN(free, 8);
+ c->lst.total_free += free - lprops->free;
+
+ /* Increase or decrease empty LEBs counter if needed */
+ if (free == c->leb_size) {
+ if (lprops->free != c->leb_size)
+ c->lst.empty_lebs += 1;
+ } else if (lprops->free == c->leb_size)
+ c->lst.empty_lebs -= 1;
+ lprops->free = free;
+ }
+
+ if (dirty != LPROPS_NC) {
+ dirty = ALIGN(dirty, 8);
+ c->lst.total_dirty += dirty - lprops->dirty;
+ lprops->dirty = dirty;
+ }
+
+ if (flags != LPROPS_NC) {
+ /* Take care about indexing LEBs counter if needed */
+ if ((lprops->flags & LPROPS_INDEX)) {
+ if (!(flags & LPROPS_INDEX))
+ c->lst.idx_lebs -= 1;
+ } else if (flags & LPROPS_INDEX)
+ c->lst.idx_lebs += 1;
+ lprops->flags = flags;
+ }
+
+ if (!(lprops->flags & LPROPS_INDEX)) {
+ int new_spc;
+
+ new_spc = lprops->free + lprops->dirty;
+ if (new_spc < c->dead_wm)
+ c->lst.total_dead += new_spc;
+ else
+ c->lst.total_dark += calc_dark(c, new_spc);
+
+ c->lst.total_used += c->leb_size - new_spc;
+ }
+
+ if ((lprops->flags & LPROPS_TAKEN) && lprops->free == c->leb_size)
+ c->lst.taken_empty_lebs += 1;
+
+ change_category(c, lprops);
+ c->idx_gc_cnt += idx_gc_cnt;
+ spin_unlock(&c->space_lock);
+ return lprops;
+}
+
+/**
+ * ubifs_get_lp_stats - get lprops statistics.
+ * @c: UBIFS file-system description object
+ * @st: return statistics
+ */
+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst)
+{
+ spin_lock(&c->space_lock);
+ memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats));
+ spin_unlock(&c->space_lock);
+}
+
+/**
+ * ubifs_change_one_lp - change LEB properties.
+ * @c: the UBIFS file-system description object
+ * @lnum: LEB to change properties for
+ * @free: amount of free space
+ * @dirty: amount of dirty space
+ * @flags_set: flags to set
+ * @flags_clean: flags to clean
+ * @idx_gc_cnt: change to the count of idx_gc list
+ *
+ * This function changes properties of LEB @lnum. It is a helper wrapper over
+ * 'ubifs_change_lp()' which hides lprops get/release. The arguments are the
+ * same as in case of 'ubifs_change_lp()'. Returns zero in case of success and
+ * a negative error code in case of failure.
+ */
+int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
+ int flags_set, int flags_clean, int idx_gc_cnt)
+{
+ int err = 0, flags;
+ const struct ubifs_lprops *lp;
+
+ ubifs_get_lprops(c);
+
+ lp = ubifs_lpt_lookup_dirty(c, lnum);
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out;
+ }
+
+ flags = (lp->flags | flags_set) & ~flags_clean;
+ lp = ubifs_change_lp(c, lp, free, dirty, flags, idx_gc_cnt);
+ if (IS_ERR(lp))
+ err = PTR_ERR(lp);
+
+out:
+ ubifs_release_lprops(c);
+ return err;
+}
+
+/**
+ * ubifs_update_one_lp - update LEB properties.
+ * @c: the UBIFS file-system description object
+ * @lnum: LEB to change properties for
+ * @free: amount of free space
+ * @dirty: amount of dirty space to add
+ * @flags_set: flags to set
+ * @flags_clean: flags to clean
+ *
+ * This function is the same as 'ubifs_change_one_lp()' but @dirty is added to
+ * current dirty space, not substitutes it.
+ */
+int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
+ int flags_set, int flags_clean)
+{
+ int err = 0, flags;
+ const struct ubifs_lprops *lp;
+
+ ubifs_get_lprops(c);
+
+ lp = ubifs_lpt_lookup_dirty(c, lnum);
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out;
+ }
+
+ flags = (lp->flags | flags_set) & ~flags_clean;
+ lp = ubifs_change_lp(c, lp, free, lp->dirty + dirty, flags, 0);
+ if (IS_ERR(lp))
+ err = PTR_ERR(lp);
+
+out:
+ ubifs_release_lprops(c);
+ return err;
+}
+
+/**
+ * ubifs_read_one_lp - read LEB properties.
+ * @c: the UBIFS file-system description object
+ * @lnum: LEB to read properties for
+ * @lp: where to store read properties
+ *
+ * This helper function reads properties of a LEB @lnum and stores them in @lp.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp)
+{
+ int err = 0;
+ const struct ubifs_lprops *lpp;
+
+ ubifs_get_lprops(c);
+
+ lpp = ubifs_lpt_lookup(c, lnum);
+ if (IS_ERR(lpp)) {
+ err = PTR_ERR(lpp);
+ goto out;
+ }
+
+ memcpy(lp, lpp, sizeof(struct ubifs_lprops));
+
+out:
+ ubifs_release_lprops(c);
+ return err;
+}
+
+/**
+ * ubifs_fast_find_free - try to find a LEB with free space quickly.
+ * @c: the UBIFS file-system description object
+ *
+ * This function returns LEB properties for a LEB with free space or %NULL if
+ * the function is unable to find a LEB quickly.
+ */
+const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c)
+{
+ struct ubifs_lprops *lprops;
+ struct ubifs_lpt_heap *heap;
+
+ ubifs_assert(mutex_is_locked(&c->lp_mutex));
+
+ heap = &c->lpt_heap[LPROPS_FREE - 1];
+ if (heap->cnt == 0)
+ return NULL;
+
+ lprops = heap->arr[0];
+ ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ return lprops;
+}
+
+/**
+ * ubifs_fast_find_empty - try to find an empty LEB quickly.
+ * @c: the UBIFS file-system description object
+ *
+ * This function returns LEB properties for an empty LEB or %NULL if the
+ * function is unable to find an empty LEB quickly.
+ */
+const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c)
+{
+ struct ubifs_lprops *lprops;
+
+ ubifs_assert(mutex_is_locked(&c->lp_mutex));
+
+ if (list_empty(&c->empty_list))
+ return NULL;
+
+ lprops = list_entry(c->empty_list.next, struct ubifs_lprops, list);
+ ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(lprops->free == c->leb_size);
+ return lprops;
+}
+
+/**
+ * ubifs_fast_find_freeable - try to find a freeable LEB quickly.
+ * @c: the UBIFS file-system description object
+ *
+ * This function returns LEB properties for a freeable LEB or %NULL if the
+ * function is unable to find a freeable LEB quickly.
+ */
+const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c)
+{
+ struct ubifs_lprops *lprops;
+
+ ubifs_assert(mutex_is_locked(&c->lp_mutex));
+
+ if (list_empty(&c->freeable_list))
+ return NULL;
+
+ lprops = list_entry(c->freeable_list.next, struct ubifs_lprops, list);
+ ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert(!(lprops->flags & LPROPS_INDEX));
+ ubifs_assert(lprops->free + lprops->dirty == c->leb_size);
+ ubifs_assert(c->freeable_cnt > 0);
+ return lprops;
+}
+
+/**
+ * ubifs_fast_find_frdi_idx - try to find a freeable index LEB quickly.
+ * @c: the UBIFS file-system description object
+ *
+ * This function returns LEB properties for a freeable index LEB or %NULL if the
+ * function is unable to find a freeable index LEB quickly.
+ */
+const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c)
+{
+ struct ubifs_lprops *lprops;
+
+ ubifs_assert(mutex_is_locked(&c->lp_mutex));
+
+ if (list_empty(&c->frdi_idx_list))
+ return NULL;
+
+ lprops = list_entry(c->frdi_idx_list.next, struct ubifs_lprops, list);
+ ubifs_assert(!(lprops->flags & LPROPS_TAKEN));
+ ubifs_assert((lprops->flags & LPROPS_INDEX));
+ ubifs_assert(lprops->free + lprops->dirty == c->leb_size);
+ return lprops;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/lpt.c b/qemu/roms/u-boot/fs/ubifs/lpt.c
new file mode 100644
index 000000000..1a50d4cc2
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/lpt.c
@@ -0,0 +1,1105 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements the LEB properties tree (LPT) area. The LPT area
+ * contains the LEB properties tree, a table of LPT area eraseblocks (ltab), and
+ * (for the "big" model) a table of saved LEB numbers (lsave). The LPT area sits
+ * between the log and the orphan area.
+ *
+ * The LPT area is like a miniature self-contained file system. It is required
+ * that it never runs out of space, is fast to access and update, and scales
+ * logarithmically. The LEB properties tree is implemented as a wandering tree
+ * much like the TNC, and the LPT area has its own garbage collection.
+ *
+ * The LPT has two slightly different forms called the "small model" and the
+ * "big model". The small model is used when the entire LEB properties table
+ * can be written into a single eraseblock. In that case, garbage collection
+ * consists of just writing the whole table, which therefore makes all other
+ * eraseblocks reusable. In the case of the big model, dirty eraseblocks are
+ * selected for garbage collection, which consists of marking the clean nodes in
+ * that LEB as dirty, and then only the dirty nodes are written out. Also, in
+ * the case of the big model, a table of LEB numbers is saved so that the entire
+ * LPT does not to be scanned looking for empty eraseblocks when UBIFS is first
+ * mounted.
+ */
+
+#include "ubifs.h"
+#include "crc16.h"
+#include <linux/math64.h>
+
+/**
+ * do_calc_lpt_geom - calculate sizes for the LPT area.
+ * @c: the UBIFS file-system description object
+ *
+ * Calculate the sizes of LPT bit fields, nodes, and tree, based on the
+ * properties of the flash and whether LPT is "big" (c->big_lpt).
+ */
+static void do_calc_lpt_geom(struct ubifs_info *c)
+{
+ int i, n, bits, per_leb_wastage, max_pnode_cnt;
+ long long sz, tot_wastage;
+
+ n = c->main_lebs + c->max_leb_cnt - c->leb_cnt;
+ max_pnode_cnt = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT);
+
+ c->lpt_hght = 1;
+ n = UBIFS_LPT_FANOUT;
+ while (n < max_pnode_cnt) {
+ c->lpt_hght += 1;
+ n <<= UBIFS_LPT_FANOUT_SHIFT;
+ }
+
+ c->pnode_cnt = DIV_ROUND_UP(c->main_lebs, UBIFS_LPT_FANOUT);
+
+ n = DIV_ROUND_UP(c->pnode_cnt, UBIFS_LPT_FANOUT);
+ c->nnode_cnt = n;
+ for (i = 1; i < c->lpt_hght; i++) {
+ n = DIV_ROUND_UP(n, UBIFS_LPT_FANOUT);
+ c->nnode_cnt += n;
+ }
+
+ c->space_bits = fls(c->leb_size) - 3;
+ c->lpt_lnum_bits = fls(c->lpt_lebs);
+ c->lpt_offs_bits = fls(c->leb_size - 1);
+ c->lpt_spc_bits = fls(c->leb_size);
+
+ n = DIV_ROUND_UP(c->max_leb_cnt, UBIFS_LPT_FANOUT);
+ c->pcnt_bits = fls(n - 1);
+
+ c->lnum_bits = fls(c->max_leb_cnt - 1);
+
+ bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
+ (c->big_lpt ? c->pcnt_bits : 0) +
+ (c->space_bits * 2 + 1) * UBIFS_LPT_FANOUT;
+ c->pnode_sz = (bits + 7) / 8;
+
+ bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
+ (c->big_lpt ? c->pcnt_bits : 0) +
+ (c->lpt_lnum_bits + c->lpt_offs_bits) * UBIFS_LPT_FANOUT;
+ c->nnode_sz = (bits + 7) / 8;
+
+ bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
+ c->lpt_lebs * c->lpt_spc_bits * 2;
+ c->ltab_sz = (bits + 7) / 8;
+
+ bits = UBIFS_LPT_CRC_BITS + UBIFS_LPT_TYPE_BITS +
+ c->lnum_bits * c->lsave_cnt;
+ c->lsave_sz = (bits + 7) / 8;
+
+ /* Calculate the minimum LPT size */
+ c->lpt_sz = (long long)c->pnode_cnt * c->pnode_sz;
+ c->lpt_sz += (long long)c->nnode_cnt * c->nnode_sz;
+ c->lpt_sz += c->ltab_sz;
+ if (c->big_lpt)
+ c->lpt_sz += c->lsave_sz;
+
+ /* Add wastage */
+ sz = c->lpt_sz;
+ per_leb_wastage = max_t(int, c->pnode_sz, c->nnode_sz);
+ sz += per_leb_wastage;
+ tot_wastage = per_leb_wastage;
+ while (sz > c->leb_size) {
+ sz += per_leb_wastage;
+ sz -= c->leb_size;
+ tot_wastage += per_leb_wastage;
+ }
+ tot_wastage += ALIGN(sz, c->min_io_size) - sz;
+ c->lpt_sz += tot_wastage;
+}
+
+/**
+ * ubifs_calc_lpt_geom - calculate and check sizes for the LPT area.
+ * @c: the UBIFS file-system description object
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_calc_lpt_geom(struct ubifs_info *c)
+{
+ int lebs_needed;
+ long long sz;
+
+ do_calc_lpt_geom(c);
+
+ /* Verify that lpt_lebs is big enough */
+ sz = c->lpt_sz * 2; /* Must have at least 2 times the size */
+ lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size);
+ if (lebs_needed > c->lpt_lebs) {
+ ubifs_err("too few LPT LEBs");
+ return -EINVAL;
+ }
+
+ /* Verify that ltab fits in a single LEB (since ltab is a single node */
+ if (c->ltab_sz > c->leb_size) {
+ ubifs_err("LPT ltab too big");
+ return -EINVAL;
+ }
+
+ c->check_lpt_free = c->big_lpt;
+ return 0;
+}
+
+/**
+ * ubifs_unpack_bits - unpack bit fields.
+ * @addr: address at which to unpack (passed and next address returned)
+ * @pos: bit position at which to unpack (passed and next position returned)
+ * @nrbits: number of bits of value to unpack (1-32)
+ *
+ * This functions returns the value unpacked.
+ */
+uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits)
+{
+ const int k = 32 - nrbits;
+ uint8_t *p = *addr;
+ int b = *pos;
+ uint32_t uninitialized_var(val);
+ const int bytes = (nrbits + b + 7) >> 3;
+
+ ubifs_assert(nrbits > 0);
+ ubifs_assert(nrbits <= 32);
+ ubifs_assert(*pos >= 0);
+ ubifs_assert(*pos < 8);
+ if (b) {
+ switch (bytes) {
+ case 2:
+ val = p[1];
+ break;
+ case 3:
+ val = p[1] | ((uint32_t)p[2] << 8);
+ break;
+ case 4:
+ val = p[1] | ((uint32_t)p[2] << 8) |
+ ((uint32_t)p[3] << 16);
+ break;
+ case 5:
+ val = p[1] | ((uint32_t)p[2] << 8) |
+ ((uint32_t)p[3] << 16) |
+ ((uint32_t)p[4] << 24);
+ }
+ val <<= (8 - b);
+ val |= *p >> b;
+ nrbits += b;
+ } else {
+ switch (bytes) {
+ case 1:
+ val = p[0];
+ break;
+ case 2:
+ val = p[0] | ((uint32_t)p[1] << 8);
+ break;
+ case 3:
+ val = p[0] | ((uint32_t)p[1] << 8) |
+ ((uint32_t)p[2] << 16);
+ break;
+ case 4:
+ val = p[0] | ((uint32_t)p[1] << 8) |
+ ((uint32_t)p[2] << 16) |
+ ((uint32_t)p[3] << 24);
+ break;
+ }
+ }
+ val <<= k;
+ val >>= k;
+ b = nrbits & 7;
+ p += nrbits >> 3;
+ *addr = p;
+ *pos = b;
+ ubifs_assert((val >> nrbits) == 0 || nrbits - b == 32);
+ return val;
+}
+
+/**
+ * ubifs_add_lpt_dirt - add dirty space to LPT LEB properties.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to which to add dirty space
+ * @dirty: amount of dirty space to add
+ */
+void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty)
+{
+ if (!dirty || !lnum)
+ return;
+ dbg_lp("LEB %d add %d to %d",
+ lnum, dirty, c->ltab[lnum - c->lpt_first].dirty);
+ ubifs_assert(lnum >= c->lpt_first && lnum <= c->lpt_last);
+ c->ltab[lnum - c->lpt_first].dirty += dirty;
+}
+
+/**
+ * ubifs_add_nnode_dirt - add dirty space to LPT LEB properties.
+ * @c: UBIFS file-system description object
+ * @nnode: nnode for which to add dirt
+ */
+void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode)
+{
+ struct ubifs_nnode *np = nnode->parent;
+
+ if (np)
+ ubifs_add_lpt_dirt(c, np->nbranch[nnode->iip].lnum,
+ c->nnode_sz);
+ else {
+ ubifs_add_lpt_dirt(c, c->lpt_lnum, c->nnode_sz);
+ if (!(c->lpt_drty_flgs & LTAB_DIRTY)) {
+ c->lpt_drty_flgs |= LTAB_DIRTY;
+ ubifs_add_lpt_dirt(c, c->ltab_lnum, c->ltab_sz);
+ }
+ }
+}
+
+/**
+ * add_pnode_dirt - add dirty space to LPT LEB properties.
+ * @c: UBIFS file-system description object
+ * @pnode: pnode for which to add dirt
+ */
+static void add_pnode_dirt(struct ubifs_info *c, struct ubifs_pnode *pnode)
+{
+ ubifs_add_lpt_dirt(c, pnode->parent->nbranch[pnode->iip].lnum,
+ c->pnode_sz);
+}
+
+/**
+ * calc_nnode_num_from_parent - calculate nnode number.
+ * @c: UBIFS file-system description object
+ * @parent: parent nnode
+ * @iip: index in parent
+ *
+ * The nnode number is a number that uniquely identifies a nnode and can be used
+ * easily to traverse the tree from the root to that nnode.
+ *
+ * This function calculates and returns the nnode number based on the parent's
+ * nnode number and the index in parent.
+ */
+static int calc_nnode_num_from_parent(const struct ubifs_info *c,
+ struct ubifs_nnode *parent, int iip)
+{
+ int num, shft;
+
+ if (!parent)
+ return 1;
+ shft = (c->lpt_hght - parent->level) * UBIFS_LPT_FANOUT_SHIFT;
+ num = parent->num ^ (1 << shft);
+ num |= (UBIFS_LPT_FANOUT + iip) << shft;
+ return num;
+}
+
+/**
+ * calc_pnode_num_from_parent - calculate pnode number.
+ * @c: UBIFS file-system description object
+ * @parent: parent nnode
+ * @iip: index in parent
+ *
+ * The pnode number is a number that uniquely identifies a pnode and can be used
+ * easily to traverse the tree from the root to that pnode.
+ *
+ * This function calculates and returns the pnode number based on the parent's
+ * nnode number and the index in parent.
+ */
+static int calc_pnode_num_from_parent(const struct ubifs_info *c,
+ struct ubifs_nnode *parent, int iip)
+{
+ int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0;
+
+ for (i = 0; i < n; i++) {
+ num <<= UBIFS_LPT_FANOUT_SHIFT;
+ num |= pnum & (UBIFS_LPT_FANOUT - 1);
+ pnum >>= UBIFS_LPT_FANOUT_SHIFT;
+ }
+ num <<= UBIFS_LPT_FANOUT_SHIFT;
+ num |= iip;
+ return num;
+}
+
+/**
+ * update_cats - add LEB properties of a pnode to LEB category lists and heaps.
+ * @c: UBIFS file-system description object
+ * @pnode: pnode
+ *
+ * When a pnode is loaded into memory, the LEB properties it contains are added,
+ * by this function, to the LEB category lists and heaps.
+ */
+static void update_cats(struct ubifs_info *c, struct ubifs_pnode *pnode)
+{
+ int i;
+
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ int cat = pnode->lprops[i].flags & LPROPS_CAT_MASK;
+ int lnum = pnode->lprops[i].lnum;
+
+ if (!lnum)
+ return;
+ ubifs_add_to_cat(c, &pnode->lprops[i], cat);
+ }
+}
+
+/**
+ * replace_cats - add LEB properties of a pnode to LEB category lists and heaps.
+ * @c: UBIFS file-system description object
+ * @old_pnode: pnode copied
+ * @new_pnode: pnode copy
+ *
+ * During commit it is sometimes necessary to copy a pnode
+ * (see dirty_cow_pnode). When that happens, references in
+ * category lists and heaps must be replaced. This function does that.
+ */
+static void replace_cats(struct ubifs_info *c, struct ubifs_pnode *old_pnode,
+ struct ubifs_pnode *new_pnode)
+{
+ int i;
+
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ if (!new_pnode->lprops[i].lnum)
+ return;
+ ubifs_replace_cat(c, &old_pnode->lprops[i],
+ &new_pnode->lprops[i]);
+ }
+}
+
+/**
+ * check_lpt_crc - check LPT node crc is correct.
+ * @c: UBIFS file-system description object
+ * @buf: buffer containing node
+ * @len: length of node
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int check_lpt_crc(void *buf, int len)
+{
+ int pos = 0;
+ uint8_t *addr = buf;
+ uint16_t crc, calc_crc;
+
+ crc = ubifs_unpack_bits(&addr, &pos, UBIFS_LPT_CRC_BITS);
+ calc_crc = crc16(-1, buf + UBIFS_LPT_CRC_BYTES,
+ len - UBIFS_LPT_CRC_BYTES);
+ if (crc != calc_crc) {
+ ubifs_err("invalid crc in LPT node: crc %hx calc %hx", crc,
+ calc_crc);
+ dbg_dump_stack();
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * check_lpt_type - check LPT node type is correct.
+ * @c: UBIFS file-system description object
+ * @addr: address of type bit field is passed and returned updated here
+ * @pos: position of type bit field is passed and returned updated here
+ * @type: expected type
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int check_lpt_type(uint8_t **addr, int *pos, int type)
+{
+ int node_type;
+
+ node_type = ubifs_unpack_bits(addr, pos, UBIFS_LPT_TYPE_BITS);
+ if (node_type != type) {
+ ubifs_err("invalid type (%d) in LPT node type %d", node_type,
+ type);
+ dbg_dump_stack();
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * unpack_pnode - unpack a pnode.
+ * @c: UBIFS file-system description object
+ * @buf: buffer containing packed pnode to unpack
+ * @pnode: pnode structure to fill
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int unpack_pnode(const struct ubifs_info *c, void *buf,
+ struct ubifs_pnode *pnode)
+{
+ uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
+ int i, pos = 0, err;
+
+ err = check_lpt_type(&addr, &pos, UBIFS_LPT_PNODE);
+ if (err)
+ return err;
+ if (c->big_lpt)
+ pnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ struct ubifs_lprops * const lprops = &pnode->lprops[i];
+
+ lprops->free = ubifs_unpack_bits(&addr, &pos, c->space_bits);
+ lprops->free <<= 3;
+ lprops->dirty = ubifs_unpack_bits(&addr, &pos, c->space_bits);
+ lprops->dirty <<= 3;
+
+ if (ubifs_unpack_bits(&addr, &pos, 1))
+ lprops->flags = LPROPS_INDEX;
+ else
+ lprops->flags = 0;
+ lprops->flags |= ubifs_categorize_lprops(c, lprops);
+ }
+ err = check_lpt_crc(buf, c->pnode_sz);
+ return err;
+}
+
+/**
+ * ubifs_unpack_nnode - unpack a nnode.
+ * @c: UBIFS file-system description object
+ * @buf: buffer containing packed nnode to unpack
+ * @nnode: nnode structure to fill
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
+ struct ubifs_nnode *nnode)
+{
+ uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
+ int i, pos = 0, err;
+
+ err = check_lpt_type(&addr, &pos, UBIFS_LPT_NNODE);
+ if (err)
+ return err;
+ if (c->big_lpt)
+ nnode->num = ubifs_unpack_bits(&addr, &pos, c->pcnt_bits);
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ int lnum;
+
+ lnum = ubifs_unpack_bits(&addr, &pos, c->lpt_lnum_bits) +
+ c->lpt_first;
+ if (lnum == c->lpt_last + 1)
+ lnum = 0;
+ nnode->nbranch[i].lnum = lnum;
+ nnode->nbranch[i].offs = ubifs_unpack_bits(&addr, &pos,
+ c->lpt_offs_bits);
+ }
+ err = check_lpt_crc(buf, c->nnode_sz);
+ return err;
+}
+
+/**
+ * unpack_ltab - unpack the LPT's own lprops table.
+ * @c: UBIFS file-system description object
+ * @buf: buffer from which to unpack
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int unpack_ltab(const struct ubifs_info *c, void *buf)
+{
+ uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES;
+ int i, pos = 0, err;
+
+ err = check_lpt_type(&addr, &pos, UBIFS_LPT_LTAB);
+ if (err)
+ return err;
+ for (i = 0; i < c->lpt_lebs; i++) {
+ int free = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
+ int dirty = ubifs_unpack_bits(&addr, &pos, c->lpt_spc_bits);
+
+ if (free < 0 || free > c->leb_size || dirty < 0 ||
+ dirty > c->leb_size || free + dirty > c->leb_size)
+ return -EINVAL;
+
+ c->ltab[i].free = free;
+ c->ltab[i].dirty = dirty;
+ c->ltab[i].tgc = 0;
+ c->ltab[i].cmt = 0;
+ }
+ err = check_lpt_crc(buf, c->ltab_sz);
+ return err;
+}
+
+/**
+ * validate_nnode - validate a nnode.
+ * @c: UBIFS file-system description object
+ * @nnode: nnode to validate
+ * @parent: parent nnode (or NULL for the root nnode)
+ * @iip: index in parent
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode,
+ struct ubifs_nnode *parent, int iip)
+{
+ int i, lvl, max_offs;
+
+ if (c->big_lpt) {
+ int num = calc_nnode_num_from_parent(c, parent, iip);
+
+ if (nnode->num != num)
+ return -EINVAL;
+ }
+ lvl = parent ? parent->level - 1 : c->lpt_hght;
+ if (lvl < 1)
+ return -EINVAL;
+ if (lvl == 1)
+ max_offs = c->leb_size - c->pnode_sz;
+ else
+ max_offs = c->leb_size - c->nnode_sz;
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ int lnum = nnode->nbranch[i].lnum;
+ int offs = nnode->nbranch[i].offs;
+
+ if (lnum == 0) {
+ if (offs != 0)
+ return -EINVAL;
+ continue;
+ }
+ if (lnum < c->lpt_first || lnum > c->lpt_last)
+ return -EINVAL;
+ if (offs < 0 || offs > max_offs)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * validate_pnode - validate a pnode.
+ * @c: UBIFS file-system description object
+ * @pnode: pnode to validate
+ * @parent: parent nnode
+ * @iip: index in parent
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode,
+ struct ubifs_nnode *parent, int iip)
+{
+ int i;
+
+ if (c->big_lpt) {
+ int num = calc_pnode_num_from_parent(c, parent, iip);
+
+ if (pnode->num != num)
+ return -EINVAL;
+ }
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ int free = pnode->lprops[i].free;
+ int dirty = pnode->lprops[i].dirty;
+
+ if (free < 0 || free > c->leb_size || free % c->min_io_size ||
+ (free & 7))
+ return -EINVAL;
+ if (dirty < 0 || dirty > c->leb_size || (dirty & 7))
+ return -EINVAL;
+ if (dirty + free > c->leb_size)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * set_pnode_lnum - set LEB numbers on a pnode.
+ * @c: UBIFS file-system description object
+ * @pnode: pnode to update
+ *
+ * This function calculates the LEB numbers for the LEB properties it contains
+ * based on the pnode number.
+ */
+static void set_pnode_lnum(const struct ubifs_info *c,
+ struct ubifs_pnode *pnode)
+{
+ int i, lnum;
+
+ lnum = (pnode->num << UBIFS_LPT_FANOUT_SHIFT) + c->main_first;
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ if (lnum >= c->leb_cnt)
+ return;
+ pnode->lprops[i].lnum = lnum++;
+ }
+}
+
+/**
+ * ubifs_read_nnode - read a nnode from flash and link it to the tree in memory.
+ * @c: UBIFS file-system description object
+ * @parent: parent nnode (or NULL for the root)
+ * @iip: index in parent
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
+{
+ struct ubifs_nbranch *branch = NULL;
+ struct ubifs_nnode *nnode = NULL;
+ void *buf = c->lpt_nod_buf;
+ int err, lnum, offs;
+
+ if (parent) {
+ branch = &parent->nbranch[iip];
+ lnum = branch->lnum;
+ offs = branch->offs;
+ } else {
+ lnum = c->lpt_lnum;
+ offs = c->lpt_offs;
+ }
+ nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_NOFS);
+ if (!nnode) {
+ err = -ENOMEM;
+ goto out;
+ }
+ if (lnum == 0) {
+ /*
+ * This nnode was not written which just means that the LEB
+ * properties in the subtree below it describe empty LEBs. We
+ * make the nnode as though we had read it, which in fact means
+ * doing almost nothing.
+ */
+ if (c->big_lpt)
+ nnode->num = calc_nnode_num_from_parent(c, parent, iip);
+ } else {
+ err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz);
+ if (err)
+ goto out;
+ err = ubifs_unpack_nnode(c, buf, nnode);
+ if (err)
+ goto out;
+ }
+ err = validate_nnode(c, nnode, parent, iip);
+ if (err)
+ goto out;
+ if (!c->big_lpt)
+ nnode->num = calc_nnode_num_from_parent(c, parent, iip);
+ if (parent) {
+ branch->nnode = nnode;
+ nnode->level = parent->level - 1;
+ } else {
+ c->nroot = nnode;
+ nnode->level = c->lpt_hght;
+ }
+ nnode->parent = parent;
+ nnode->iip = iip;
+ return 0;
+
+out:
+ ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
+ kfree(nnode);
+ return err;
+}
+
+/**
+ * read_pnode - read a pnode from flash and link it to the tree in memory.
+ * @c: UBIFS file-system description object
+ * @parent: parent nnode
+ * @iip: index in parent
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
+{
+ struct ubifs_nbranch *branch;
+ struct ubifs_pnode *pnode = NULL;
+ void *buf = c->lpt_nod_buf;
+ int err, lnum, offs;
+
+ branch = &parent->nbranch[iip];
+ lnum = branch->lnum;
+ offs = branch->offs;
+ pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
+ if (!pnode) {
+ err = -ENOMEM;
+ goto out;
+ }
+ if (lnum == 0) {
+ /*
+ * This pnode was not written which just means that the LEB
+ * properties in it describe empty LEBs. We make the pnode as
+ * though we had read it.
+ */
+ int i;
+
+ if (c->big_lpt)
+ pnode->num = calc_pnode_num_from_parent(c, parent, iip);
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ struct ubifs_lprops * const lprops = &pnode->lprops[i];
+
+ lprops->free = c->leb_size;
+ lprops->flags = ubifs_categorize_lprops(c, lprops);
+ }
+ } else {
+ err = ubi_read(c->ubi, lnum, buf, offs, c->pnode_sz);
+ if (err)
+ goto out;
+ err = unpack_pnode(c, buf, pnode);
+ if (err)
+ goto out;
+ }
+ err = validate_pnode(c, pnode, parent, iip);
+ if (err)
+ goto out;
+ if (!c->big_lpt)
+ pnode->num = calc_pnode_num_from_parent(c, parent, iip);
+ branch->pnode = pnode;
+ pnode->parent = parent;
+ pnode->iip = iip;
+ set_pnode_lnum(c, pnode);
+ c->pnodes_have += 1;
+ return 0;
+
+out:
+ ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
+ dbg_dump_pnode(c, pnode, parent, iip);
+ dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
+ kfree(pnode);
+ return err;
+}
+
+/**
+ * read_ltab - read LPT's own lprops table.
+ * @c: UBIFS file-system description object
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int read_ltab(struct ubifs_info *c)
+{
+ int err;
+ void *buf;
+
+ buf = vmalloc(c->ltab_sz);
+ if (!buf)
+ return -ENOMEM;
+ err = ubi_read(c->ubi, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz);
+ if (err)
+ goto out;
+ err = unpack_ltab(c, buf);
+out:
+ vfree(buf);
+ return err;
+}
+
+/**
+ * ubifs_get_nnode - get a nnode.
+ * @c: UBIFS file-system description object
+ * @parent: parent nnode (or NULL for the root)
+ * @iip: index in parent
+ *
+ * This function returns a pointer to the nnode on success or a negative error
+ * code on failure.
+ */
+struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c,
+ struct ubifs_nnode *parent, int iip)
+{
+ struct ubifs_nbranch *branch;
+ struct ubifs_nnode *nnode;
+ int err;
+
+ branch = &parent->nbranch[iip];
+ nnode = branch->nnode;
+ if (nnode)
+ return nnode;
+ err = ubifs_read_nnode(c, parent, iip);
+ if (err)
+ return ERR_PTR(err);
+ return branch->nnode;
+}
+
+/**
+ * ubifs_get_pnode - get a pnode.
+ * @c: UBIFS file-system description object
+ * @parent: parent nnode
+ * @iip: index in parent
+ *
+ * This function returns a pointer to the pnode on success or a negative error
+ * code on failure.
+ */
+struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c,
+ struct ubifs_nnode *parent, int iip)
+{
+ struct ubifs_nbranch *branch;
+ struct ubifs_pnode *pnode;
+ int err;
+
+ branch = &parent->nbranch[iip];
+ pnode = branch->pnode;
+ if (pnode)
+ return pnode;
+ err = read_pnode(c, parent, iip);
+ if (err)
+ return ERR_PTR(err);
+ update_cats(c, branch->pnode);
+ return branch->pnode;
+}
+
+/**
+ * ubifs_lpt_lookup - lookup LEB properties in the LPT.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to lookup
+ *
+ * This function returns a pointer to the LEB properties on success or a
+ * negative error code on failure.
+ */
+struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
+{
+ int err, i, h, iip, shft;
+ struct ubifs_nnode *nnode;
+ struct ubifs_pnode *pnode;
+
+ if (!c->nroot) {
+ err = ubifs_read_nnode(c, NULL, 0);
+ if (err)
+ return ERR_PTR(err);
+ }
+ nnode = c->nroot;
+ i = lnum - c->main_first;
+ shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
+ for (h = 1; h < c->lpt_hght; h++) {
+ iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
+ shft -= UBIFS_LPT_FANOUT_SHIFT;
+ nnode = ubifs_get_nnode(c, nnode, iip);
+ if (IS_ERR(nnode))
+ return ERR_PTR(PTR_ERR(nnode));
+ }
+ iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
+ shft -= UBIFS_LPT_FANOUT_SHIFT;
+ pnode = ubifs_get_pnode(c, nnode, iip);
+ if (IS_ERR(pnode))
+ return ERR_PTR(PTR_ERR(pnode));
+ iip = (i & (UBIFS_LPT_FANOUT - 1));
+ dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
+ pnode->lprops[iip].free, pnode->lprops[iip].dirty,
+ pnode->lprops[iip].flags);
+ return &pnode->lprops[iip];
+}
+
+/**
+ * dirty_cow_nnode - ensure a nnode is not being committed.
+ * @c: UBIFS file-system description object
+ * @nnode: nnode to check
+ *
+ * Returns dirtied nnode on success or negative error code on failure.
+ */
+static struct ubifs_nnode *dirty_cow_nnode(struct ubifs_info *c,
+ struct ubifs_nnode *nnode)
+{
+ struct ubifs_nnode *n;
+ int i;
+
+ if (!test_bit(COW_CNODE, &nnode->flags)) {
+ /* nnode is not being committed */
+ if (!test_and_set_bit(DIRTY_CNODE, &nnode->flags)) {
+ c->dirty_nn_cnt += 1;
+ ubifs_add_nnode_dirt(c, nnode);
+ }
+ return nnode;
+ }
+
+ /* nnode is being committed, so copy it */
+ n = kmalloc(sizeof(struct ubifs_nnode), GFP_NOFS);
+ if (unlikely(!n))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(n, nnode, sizeof(struct ubifs_nnode));
+ n->cnext = NULL;
+ __set_bit(DIRTY_CNODE, &n->flags);
+ __clear_bit(COW_CNODE, &n->flags);
+
+ /* The children now have new parent */
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ struct ubifs_nbranch *branch = &n->nbranch[i];
+
+ if (branch->cnode)
+ branch->cnode->parent = n;
+ }
+
+ ubifs_assert(!test_bit(OBSOLETE_CNODE, &nnode->flags));
+ __set_bit(OBSOLETE_CNODE, &nnode->flags);
+
+ c->dirty_nn_cnt += 1;
+ ubifs_add_nnode_dirt(c, nnode);
+ if (nnode->parent)
+ nnode->parent->nbranch[n->iip].nnode = n;
+ else
+ c->nroot = n;
+ return n;
+}
+
+/**
+ * dirty_cow_pnode - ensure a pnode is not being committed.
+ * @c: UBIFS file-system description object
+ * @pnode: pnode to check
+ *
+ * Returns dirtied pnode on success or negative error code on failure.
+ */
+static struct ubifs_pnode *dirty_cow_pnode(struct ubifs_info *c,
+ struct ubifs_pnode *pnode)
+{
+ struct ubifs_pnode *p;
+
+ if (!test_bit(COW_CNODE, &pnode->flags)) {
+ /* pnode is not being committed */
+ if (!test_and_set_bit(DIRTY_CNODE, &pnode->flags)) {
+ c->dirty_pn_cnt += 1;
+ add_pnode_dirt(c, pnode);
+ }
+ return pnode;
+ }
+
+ /* pnode is being committed, so copy it */
+ p = kmalloc(sizeof(struct ubifs_pnode), GFP_NOFS);
+ if (unlikely(!p))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(p, pnode, sizeof(struct ubifs_pnode));
+ p->cnext = NULL;
+ __set_bit(DIRTY_CNODE, &p->flags);
+ __clear_bit(COW_CNODE, &p->flags);
+ replace_cats(c, pnode, p);
+
+ ubifs_assert(!test_bit(OBSOLETE_CNODE, &pnode->flags));
+ __set_bit(OBSOLETE_CNODE, &pnode->flags);
+
+ c->dirty_pn_cnt += 1;
+ add_pnode_dirt(c, pnode);
+ pnode->parent->nbranch[p->iip].pnode = p;
+ return p;
+}
+
+/**
+ * ubifs_lpt_lookup_dirty - lookup LEB properties in the LPT.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to lookup
+ *
+ * This function returns a pointer to the LEB properties on success or a
+ * negative error code on failure.
+ */
+struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
+{
+ int err, i, h, iip, shft;
+ struct ubifs_nnode *nnode;
+ struct ubifs_pnode *pnode;
+
+ if (!c->nroot) {
+ err = ubifs_read_nnode(c, NULL, 0);
+ if (err)
+ return ERR_PTR(err);
+ }
+ nnode = c->nroot;
+ nnode = dirty_cow_nnode(c, nnode);
+ if (IS_ERR(nnode))
+ return ERR_PTR(PTR_ERR(nnode));
+ i = lnum - c->main_first;
+ shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
+ for (h = 1; h < c->lpt_hght; h++) {
+ iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
+ shft -= UBIFS_LPT_FANOUT_SHIFT;
+ nnode = ubifs_get_nnode(c, nnode, iip);
+ if (IS_ERR(nnode))
+ return ERR_PTR(PTR_ERR(nnode));
+ nnode = dirty_cow_nnode(c, nnode);
+ if (IS_ERR(nnode))
+ return ERR_PTR(PTR_ERR(nnode));
+ }
+ iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
+ shft -= UBIFS_LPT_FANOUT_SHIFT;
+ pnode = ubifs_get_pnode(c, nnode, iip);
+ if (IS_ERR(pnode))
+ return ERR_PTR(PTR_ERR(pnode));
+ pnode = dirty_cow_pnode(c, pnode);
+ if (IS_ERR(pnode))
+ return ERR_PTR(PTR_ERR(pnode));
+ iip = (i & (UBIFS_LPT_FANOUT - 1));
+ dbg_lp("LEB %d, free %d, dirty %d, flags %d", lnum,
+ pnode->lprops[iip].free, pnode->lprops[iip].dirty,
+ pnode->lprops[iip].flags);
+ ubifs_assert(test_bit(DIRTY_CNODE, &pnode->flags));
+ return &pnode->lprops[iip];
+}
+
+/**
+ * lpt_init_rd - initialize the LPT for reading.
+ * @c: UBIFS file-system description object
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int lpt_init_rd(struct ubifs_info *c)
+{
+ int err, i;
+
+ c->ltab = vmalloc(sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
+ if (!c->ltab)
+ return -ENOMEM;
+
+ i = max_t(int, c->nnode_sz, c->pnode_sz);
+ c->lpt_nod_buf = kmalloc(i, GFP_KERNEL);
+ if (!c->lpt_nod_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < LPROPS_HEAP_CNT; i++) {
+ c->lpt_heap[i].arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ,
+ GFP_KERNEL);
+ if (!c->lpt_heap[i].arr)
+ return -ENOMEM;
+ c->lpt_heap[i].cnt = 0;
+ c->lpt_heap[i].max_cnt = LPT_HEAP_SZ;
+ }
+
+ c->dirty_idx.arr = kmalloc(sizeof(void *) * LPT_HEAP_SZ, GFP_KERNEL);
+ if (!c->dirty_idx.arr)
+ return -ENOMEM;
+ c->dirty_idx.cnt = 0;
+ c->dirty_idx.max_cnt = LPT_HEAP_SZ;
+
+ err = read_ltab(c);
+ if (err)
+ return err;
+
+ dbg_lp("space_bits %d", c->space_bits);
+ dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits);
+ dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits);
+ dbg_lp("lpt_spc_bits %d", c->lpt_spc_bits);
+ dbg_lp("pcnt_bits %d", c->pcnt_bits);
+ dbg_lp("lnum_bits %d", c->lnum_bits);
+ dbg_lp("pnode_sz %d", c->pnode_sz);
+ dbg_lp("nnode_sz %d", c->nnode_sz);
+ dbg_lp("ltab_sz %d", c->ltab_sz);
+ dbg_lp("lsave_sz %d", c->lsave_sz);
+ dbg_lp("lsave_cnt %d", c->lsave_cnt);
+ dbg_lp("lpt_hght %d", c->lpt_hght);
+ dbg_lp("big_lpt %d", c->big_lpt);
+ dbg_lp("LPT root is at %d:%d", c->lpt_lnum, c->lpt_offs);
+ dbg_lp("LPT head is at %d:%d", c->nhead_lnum, c->nhead_offs);
+ dbg_lp("LPT ltab is at %d:%d", c->ltab_lnum, c->ltab_offs);
+ if (c->big_lpt)
+ dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
+
+ return 0;
+}
+
+/**
+ * ubifs_lpt_init - initialize the LPT.
+ * @c: UBIFS file-system description object
+ * @rd: whether to initialize lpt for reading
+ * @wr: whether to initialize lpt for writing
+ *
+ * For mounting 'rw', @rd and @wr are both true. For mounting 'ro', @rd is true
+ * and @wr is false. For mounting from 'ro' to 'rw', @rd is false and @wr is
+ * true.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
+{
+ int err;
+
+ if (rd) {
+ err = lpt_init_rd(c);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/lpt_commit.c b/qemu/roms/u-boot/fs/ubifs/lpt_commit.c
new file mode 100644
index 000000000..c0af8187a
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/lpt_commit.c
@@ -0,0 +1,171 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements commit-related functionality of the LEB properties
+ * subsystem.
+ */
+
+#include "crc16.h"
+#include "ubifs.h"
+
+/**
+ * free_obsolete_cnodes - free obsolete cnodes for commit end.
+ * @c: UBIFS file-system description object
+ */
+static void free_obsolete_cnodes(struct ubifs_info *c)
+{
+ struct ubifs_cnode *cnode, *cnext;
+
+ cnext = c->lpt_cnext;
+ if (!cnext)
+ return;
+ do {
+ cnode = cnext;
+ cnext = cnode->cnext;
+ if (test_bit(OBSOLETE_CNODE, &cnode->flags))
+ kfree(cnode);
+ else
+ cnode->cnext = NULL;
+ } while (cnext != c->lpt_cnext);
+ c->lpt_cnext = NULL;
+}
+
+/**
+ * first_nnode - find the first nnode in memory.
+ * @c: UBIFS file-system description object
+ * @hght: height of tree where nnode found is returned here
+ *
+ * This function returns a pointer to the nnode found or %NULL if no nnode is
+ * found. This function is a helper to 'ubifs_lpt_free()'.
+ */
+static struct ubifs_nnode *first_nnode(struct ubifs_info *c, int *hght)
+{
+ struct ubifs_nnode *nnode;
+ int h, i, found;
+
+ nnode = c->nroot;
+ *hght = 0;
+ if (!nnode)
+ return NULL;
+ for (h = 1; h < c->lpt_hght; h++) {
+ found = 0;
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ if (nnode->nbranch[i].nnode) {
+ found = 1;
+ nnode = nnode->nbranch[i].nnode;
+ *hght = h;
+ break;
+ }
+ }
+ if (!found)
+ break;
+ }
+ return nnode;
+}
+
+/**
+ * next_nnode - find the next nnode in memory.
+ * @c: UBIFS file-system description object
+ * @nnode: nnode from which to start.
+ * @hght: height of tree where nnode is, is passed and returned here
+ *
+ * This function returns a pointer to the nnode found or %NULL if no nnode is
+ * found. This function is a helper to 'ubifs_lpt_free()'.
+ */
+static struct ubifs_nnode *next_nnode(struct ubifs_info *c,
+ struct ubifs_nnode *nnode, int *hght)
+{
+ struct ubifs_nnode *parent;
+ int iip, h, i, found;
+
+ parent = nnode->parent;
+ if (!parent)
+ return NULL;
+ if (nnode->iip == UBIFS_LPT_FANOUT - 1) {
+ *hght -= 1;
+ return parent;
+ }
+ for (iip = nnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
+ nnode = parent->nbranch[iip].nnode;
+ if (nnode)
+ break;
+ }
+ if (!nnode) {
+ *hght -= 1;
+ return parent;
+ }
+ for (h = *hght + 1; h < c->lpt_hght; h++) {
+ found = 0;
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+ if (nnode->nbranch[i].nnode) {
+ found = 1;
+ nnode = nnode->nbranch[i].nnode;
+ *hght = h;
+ break;
+ }
+ }
+ if (!found)
+ break;
+ }
+ return nnode;
+}
+
+/**
+ * ubifs_lpt_free - free resources owned by the LPT.
+ * @c: UBIFS file-system description object
+ * @wr_only: free only resources used for writing
+ */
+void ubifs_lpt_free(struct ubifs_info *c, int wr_only)
+{
+ struct ubifs_nnode *nnode;
+ int i, hght;
+
+ /* Free write-only things first */
+
+ free_obsolete_cnodes(c); /* Leftover from a failed commit */
+
+ vfree(c->ltab_cmt);
+ c->ltab_cmt = NULL;
+ vfree(c->lpt_buf);
+ c->lpt_buf = NULL;
+ kfree(c->lsave);
+ c->lsave = NULL;
+
+ if (wr_only)
+ return;
+
+ /* Now free the rest */
+
+ nnode = first_nnode(c, &hght);
+ while (nnode) {
+ for (i = 0; i < UBIFS_LPT_FANOUT; i++)
+ kfree(nnode->nbranch[i].nnode);
+ nnode = next_nnode(c, nnode, &hght);
+ }
+ for (i = 0; i < LPROPS_HEAP_CNT; i++)
+ kfree(c->lpt_heap[i].arr);
+ kfree(c->dirty_idx.arr);
+ kfree(c->nroot);
+ vfree(c->ltab);
+ kfree(c->lpt_nod_buf);
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/master.c b/qemu/roms/u-boot/fs/ubifs/master.c
new file mode 100644
index 000000000..3f2926e87
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/master.c
@@ -0,0 +1,341 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/* This file implements reading and writing the master node */
+
+#include "ubifs.h"
+
+/**
+ * scan_for_master - search the valid master node.
+ * @c: UBIFS file-system description object
+ *
+ * This function scans the master node LEBs and search for the latest master
+ * node. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int scan_for_master(struct ubifs_info *c)
+{
+ struct ubifs_scan_leb *sleb;
+ struct ubifs_scan_node *snod;
+ int lnum, offs = 0, nodes_cnt;
+
+ lnum = UBIFS_MST_LNUM;
+
+ sleb = ubifs_scan(c, lnum, 0, c->sbuf);
+ if (IS_ERR(sleb))
+ return PTR_ERR(sleb);
+ nodes_cnt = sleb->nodes_cnt;
+ if (nodes_cnt > 0) {
+ snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
+ list);
+ if (snod->type != UBIFS_MST_NODE)
+ goto out;
+ memcpy(c->mst_node, snod->node, snod->len);
+ offs = snod->offs;
+ }
+ ubifs_scan_destroy(sleb);
+
+ lnum += 1;
+
+ sleb = ubifs_scan(c, lnum, 0, c->sbuf);
+ if (IS_ERR(sleb))
+ return PTR_ERR(sleb);
+ if (sleb->nodes_cnt != nodes_cnt)
+ goto out;
+ if (!sleb->nodes_cnt)
+ goto out;
+ snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list);
+ if (snod->type != UBIFS_MST_NODE)
+ goto out;
+ if (snod->offs != offs)
+ goto out;
+ if (memcmp((void *)c->mst_node + UBIFS_CH_SZ,
+ (void *)snod->node + UBIFS_CH_SZ,
+ UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
+ goto out;
+ c->mst_offs = offs;
+ ubifs_scan_destroy(sleb);
+ return 0;
+
+out:
+ ubifs_scan_destroy(sleb);
+ return -EINVAL;
+}
+
+/**
+ * validate_master - validate master node.
+ * @c: UBIFS file-system description object
+ *
+ * This function validates data which was read from master node. Returns zero
+ * if the data is all right and %-EINVAL if not.
+ */
+static int validate_master(const struct ubifs_info *c)
+{
+ long long main_sz;
+ int err;
+
+ if (c->max_sqnum >= SQNUM_WATERMARK) {
+ err = 1;
+ goto out;
+ }
+
+ if (c->cmt_no >= c->max_sqnum) {
+ err = 2;
+ goto out;
+ }
+
+ if (c->highest_inum >= INUM_WATERMARK) {
+ err = 3;
+ goto out;
+ }
+
+ if (c->lhead_lnum < UBIFS_LOG_LNUM ||
+ c->lhead_lnum >= UBIFS_LOG_LNUM + c->log_lebs ||
+ c->lhead_offs < 0 || c->lhead_offs >= c->leb_size ||
+ c->lhead_offs & (c->min_io_size - 1)) {
+ err = 4;
+ goto out;
+ }
+
+ if (c->zroot.lnum >= c->leb_cnt || c->zroot.lnum < c->main_first ||
+ c->zroot.offs >= c->leb_size || c->zroot.offs & 7) {
+ err = 5;
+ goto out;
+ }
+
+ if (c->zroot.len < c->ranges[UBIFS_IDX_NODE].min_len ||
+ c->zroot.len > c->ranges[UBIFS_IDX_NODE].max_len) {
+ err = 6;
+ goto out;
+ }
+
+ if (c->gc_lnum >= c->leb_cnt || c->gc_lnum < c->main_first) {
+ err = 7;
+ goto out;
+ }
+
+ if (c->ihead_lnum >= c->leb_cnt || c->ihead_lnum < c->main_first ||
+ c->ihead_offs % c->min_io_size || c->ihead_offs < 0 ||
+ c->ihead_offs > c->leb_size || c->ihead_offs & 7) {
+ err = 8;
+ goto out;
+ }
+
+ main_sz = (long long)c->main_lebs * c->leb_size;
+ if (c->old_idx_sz & 7 || c->old_idx_sz >= main_sz) {
+ err = 9;
+ goto out;
+ }
+
+ if (c->lpt_lnum < c->lpt_first || c->lpt_lnum > c->lpt_last ||
+ c->lpt_offs < 0 || c->lpt_offs + c->nnode_sz > c->leb_size) {
+ err = 10;
+ goto out;
+ }
+
+ if (c->nhead_lnum < c->lpt_first || c->nhead_lnum > c->lpt_last ||
+ c->nhead_offs < 0 || c->nhead_offs % c->min_io_size ||
+ c->nhead_offs > c->leb_size) {
+ err = 11;
+ goto out;
+ }
+
+ if (c->ltab_lnum < c->lpt_first || c->ltab_lnum > c->lpt_last ||
+ c->ltab_offs < 0 ||
+ c->ltab_offs + c->ltab_sz > c->leb_size) {
+ err = 12;
+ goto out;
+ }
+
+ if (c->big_lpt && (c->lsave_lnum < c->lpt_first ||
+ c->lsave_lnum > c->lpt_last || c->lsave_offs < 0 ||
+ c->lsave_offs + c->lsave_sz > c->leb_size)) {
+ err = 13;
+ goto out;
+ }
+
+ if (c->lscan_lnum < c->main_first || c->lscan_lnum >= c->leb_cnt) {
+ err = 14;
+ goto out;
+ }
+
+ if (c->lst.empty_lebs < 0 || c->lst.empty_lebs > c->main_lebs - 2) {
+ err = 15;
+ goto out;
+ }
+
+ if (c->lst.idx_lebs < 0 || c->lst.idx_lebs > c->main_lebs - 1) {
+ err = 16;
+ goto out;
+ }
+
+ if (c->lst.total_free < 0 || c->lst.total_free > main_sz ||
+ c->lst.total_free & 7) {
+ err = 17;
+ goto out;
+ }
+
+ if (c->lst.total_dirty < 0 || (c->lst.total_dirty & 7)) {
+ err = 18;
+ goto out;
+ }
+
+ if (c->lst.total_used < 0 || (c->lst.total_used & 7)) {
+ err = 19;
+ goto out;
+ }
+
+ if (c->lst.total_free + c->lst.total_dirty +
+ c->lst.total_used > main_sz) {
+ err = 20;
+ goto out;
+ }
+
+ if (c->lst.total_dead + c->lst.total_dark +
+ c->lst.total_used + c->old_idx_sz > main_sz) {
+ err = 21;
+ goto out;
+ }
+
+ if (c->lst.total_dead < 0 ||
+ c->lst.total_dead > c->lst.total_free + c->lst.total_dirty ||
+ c->lst.total_dead & 7) {
+ err = 22;
+ goto out;
+ }
+
+ if (c->lst.total_dark < 0 ||
+ c->lst.total_dark > c->lst.total_free + c->lst.total_dirty ||
+ c->lst.total_dark & 7) {
+ err = 23;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ ubifs_err("bad master node at offset %d error %d", c->mst_offs, err);
+ dbg_dump_node(c, c->mst_node);
+ return -EINVAL;
+}
+
+/**
+ * ubifs_read_master - read master node.
+ * @c: UBIFS file-system description object
+ *
+ * This function finds and reads the master node during file-system mount. If
+ * the flash is empty, it creates default master node as well. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubifs_read_master(struct ubifs_info *c)
+{
+ int err, old_leb_cnt;
+
+ c->mst_node = kzalloc(c->mst_node_alsz, GFP_KERNEL);
+ if (!c->mst_node)
+ return -ENOMEM;
+
+ err = scan_for_master(c);
+ if (err) {
+ err = ubifs_recover_master_node(c);
+ if (err)
+ /*
+ * Note, we do not free 'c->mst_node' here because the
+ * unmount routine will take care of this.
+ */
+ return err;
+ }
+
+ /* Make sure that the recovery flag is clear */
+ c->mst_node->flags &= cpu_to_le32(~UBIFS_MST_RCVRY);
+
+ c->max_sqnum = le64_to_cpu(c->mst_node->ch.sqnum);
+ c->highest_inum = le64_to_cpu(c->mst_node->highest_inum);
+ c->cmt_no = le64_to_cpu(c->mst_node->cmt_no);
+ c->zroot.lnum = le32_to_cpu(c->mst_node->root_lnum);
+ c->zroot.offs = le32_to_cpu(c->mst_node->root_offs);
+ c->zroot.len = le32_to_cpu(c->mst_node->root_len);
+ c->lhead_lnum = le32_to_cpu(c->mst_node->log_lnum);
+ c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum);
+ c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum);
+ c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs);
+ c->old_idx_sz = le64_to_cpu(c->mst_node->index_size);
+ c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum);
+ c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs);
+ c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum);
+ c->nhead_offs = le32_to_cpu(c->mst_node->nhead_offs);
+ c->ltab_lnum = le32_to_cpu(c->mst_node->ltab_lnum);
+ c->ltab_offs = le32_to_cpu(c->mst_node->ltab_offs);
+ c->lsave_lnum = le32_to_cpu(c->mst_node->lsave_lnum);
+ c->lsave_offs = le32_to_cpu(c->mst_node->lsave_offs);
+ c->lscan_lnum = le32_to_cpu(c->mst_node->lscan_lnum);
+ c->lst.empty_lebs = le32_to_cpu(c->mst_node->empty_lebs);
+ c->lst.idx_lebs = le32_to_cpu(c->mst_node->idx_lebs);
+ old_leb_cnt = le32_to_cpu(c->mst_node->leb_cnt);
+ c->lst.total_free = le64_to_cpu(c->mst_node->total_free);
+ c->lst.total_dirty = le64_to_cpu(c->mst_node->total_dirty);
+ c->lst.total_used = le64_to_cpu(c->mst_node->total_used);
+ c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead);
+ c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark);
+
+ c->calc_idx_sz = c->old_idx_sz;
+
+ if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS))
+ c->no_orphs = 1;
+
+ if (old_leb_cnt != c->leb_cnt) {
+ /* The file system has been resized */
+ int growth = c->leb_cnt - old_leb_cnt;
+
+ if (c->leb_cnt < old_leb_cnt ||
+ c->leb_cnt < UBIFS_MIN_LEB_CNT) {
+ ubifs_err("bad leb_cnt on master node");
+ dbg_dump_node(c, c->mst_node);
+ return -EINVAL;
+ }
+
+ dbg_mnt("Auto resizing (master) from %d LEBs to %d LEBs",
+ old_leb_cnt, c->leb_cnt);
+ c->lst.empty_lebs += growth;
+ c->lst.total_free += growth * (long long)c->leb_size;
+ c->lst.total_dark += growth * (long long)c->dark_wm;
+
+ /*
+ * Reflect changes back onto the master node. N.B. the master
+ * node gets written immediately whenever mounting (or
+ * remounting) in read-write mode, so we do not need to write it
+ * here.
+ */
+ c->mst_node->leb_cnt = cpu_to_le32(c->leb_cnt);
+ c->mst_node->empty_lebs = cpu_to_le32(c->lst.empty_lebs);
+ c->mst_node->total_free = cpu_to_le64(c->lst.total_free);
+ c->mst_node->total_dark = cpu_to_le64(c->lst.total_dark);
+ }
+
+ err = validate_master(c);
+ if (err)
+ return err;
+
+ err = dbg_old_index_check_init(c, &c->zroot);
+
+ return err;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/misc.h b/qemu/roms/u-boot/fs/ubifs/misc.h
new file mode 100644
index 000000000..609232e93
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/misc.h
@@ -0,0 +1,311 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This file contains miscellaneous helper functions.
+ */
+
+#ifndef __UBIFS_MISC_H__
+#define __UBIFS_MISC_H__
+
+/**
+ * ubifs_zn_dirty - check if znode is dirty.
+ * @znode: znode to check
+ *
+ * This helper function returns %1 if @znode is dirty and %0 otherwise.
+ */
+static inline int ubifs_zn_dirty(const struct ubifs_znode *znode)
+{
+ return !!test_bit(DIRTY_ZNODE, &znode->flags);
+}
+
+/**
+ * ubifs_wake_up_bgt - wake up background thread.
+ * @c: UBIFS file-system description object
+ */
+static inline void ubifs_wake_up_bgt(struct ubifs_info *c)
+{
+ if (c->bgt && !c->need_bgt) {
+ c->need_bgt = 1;
+ wake_up_process(c->bgt);
+ }
+}
+
+/**
+ * ubifs_tnc_find_child - find next child in znode.
+ * @znode: znode to search at
+ * @start: the zbranch index to start at
+ *
+ * This helper function looks for znode child starting at index @start. Returns
+ * the child or %NULL if no children were found.
+ */
+static inline struct ubifs_znode *
+ubifs_tnc_find_child(struct ubifs_znode *znode, int start)
+{
+ while (start < znode->child_cnt) {
+ if (znode->zbranch[start].znode)
+ return znode->zbranch[start].znode;
+ start += 1;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubifs_inode - get UBIFS inode information by VFS 'struct inode' object.
+ * @inode: the VFS 'struct inode' pointer
+ */
+static inline struct ubifs_inode *ubifs_inode(const struct inode *inode)
+{
+ return container_of(inode, struct ubifs_inode, vfs_inode);
+}
+
+/**
+ * ubifs_compr_present - check if compressor was compiled in.
+ * @compr_type: compressor type to check
+ *
+ * This function returns %1 of compressor of type @compr_type is present, and
+ * %0 if not.
+ */
+static inline int ubifs_compr_present(int compr_type)
+{
+ ubifs_assert(compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT);
+ return !!ubifs_compressors[compr_type]->capi_name;
+}
+
+/**
+ * ubifs_compr_name - get compressor name string by its type.
+ * @compr_type: compressor type
+ *
+ * This function returns compressor type string.
+ */
+static inline const char *ubifs_compr_name(int compr_type)
+{
+ ubifs_assert(compr_type >= 0 && compr_type < UBIFS_COMPR_TYPES_CNT);
+ return ubifs_compressors[compr_type]->name;
+}
+
+/**
+ * ubifs_wbuf_sync - synchronize write-buffer.
+ * @wbuf: write-buffer to synchronize
+ *
+ * This is the same as as 'ubifs_wbuf_sync_nolock()' but it does not assume
+ * that the write-buffer is already locked.
+ */
+static inline int ubifs_wbuf_sync(struct ubifs_wbuf *wbuf)
+{
+ int err;
+
+ mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
+ err = ubifs_wbuf_sync_nolock(wbuf);
+ mutex_unlock(&wbuf->io_mutex);
+ return err;
+}
+
+/**
+ * ubifs_leb_unmap - unmap an LEB.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to unmap
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static inline int ubifs_leb_unmap(const struct ubifs_info *c, int lnum)
+{
+ int err;
+
+ if (c->ro_media)
+ return -EROFS;
+ err = ubi_leb_unmap(c->ubi, lnum);
+ if (err) {
+ ubifs_err("unmap LEB %d failed, error %d", lnum, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubifs_leb_write - write to a LEB.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to write
+ * @buf: buffer to write from
+ * @offs: offset within LEB to write to
+ * @len: length to write
+ * @dtype: data type
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static inline int ubifs_leb_write(const struct ubifs_info *c, int lnum,
+ const void *buf, int offs, int len, int dtype)
+{
+ int err;
+
+ if (c->ro_media)
+ return -EROFS;
+ err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
+ if (err) {
+ ubifs_err("writing %d bytes at %d:%d, error %d",
+ len, lnum, offs, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubifs_leb_change - atomic LEB change.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to write
+ * @buf: buffer to write from
+ * @len: length to write
+ * @dtype: data type
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static inline int ubifs_leb_change(const struct ubifs_info *c, int lnum,
+ const void *buf, int len, int dtype)
+{
+ int err;
+
+ if (c->ro_media)
+ return -EROFS;
+ err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
+ if (err) {
+ ubifs_err("changing %d bytes in LEB %d, error %d",
+ len, lnum, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubifs_add_dirt - add dirty space to LEB properties.
+ * @c: the UBIFS file-system description object
+ * @lnum: LEB to add dirty space for
+ * @dirty: dirty space to add
+ *
+ * This is a helper function which increased amount of dirty LEB space. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static inline int ubifs_add_dirt(struct ubifs_info *c, int lnum, int dirty)
+{
+ return ubifs_update_one_lp(c, lnum, LPROPS_NC, dirty, 0, 0);
+}
+
+/**
+ * ubifs_return_leb - return LEB to lprops.
+ * @c: the UBIFS file-system description object
+ * @lnum: LEB to return
+ *
+ * This helper function cleans the "taken" flag of a logical eraseblock in the
+ * lprops. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static inline int ubifs_return_leb(struct ubifs_info *c, int lnum)
+{
+ return ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
+ LPROPS_TAKEN, 0);
+}
+
+/**
+ * ubifs_idx_node_sz - return index node size.
+ * @c: the UBIFS file-system description object
+ * @child_cnt: number of children of this index node
+ */
+static inline int ubifs_idx_node_sz(const struct ubifs_info *c, int child_cnt)
+{
+ return UBIFS_IDX_NODE_SZ + (UBIFS_BRANCH_SZ + c->key_len) * child_cnt;
+}
+
+/**
+ * ubifs_idx_branch - return pointer to an index branch.
+ * @c: the UBIFS file-system description object
+ * @idx: index node
+ * @bnum: branch number
+ */
+static inline
+struct ubifs_branch *ubifs_idx_branch(const struct ubifs_info *c,
+ const struct ubifs_idx_node *idx,
+ int bnum)
+{
+ return (struct ubifs_branch *)((void *)idx->branches +
+ (UBIFS_BRANCH_SZ + c->key_len) * bnum);
+}
+
+/**
+ * ubifs_idx_key - return pointer to an index key.
+ * @c: the UBIFS file-system description object
+ * @idx: index node
+ */
+static inline void *ubifs_idx_key(const struct ubifs_info *c,
+ const struct ubifs_idx_node *idx)
+{
+ const __u8 *branch = idx->branches;
+ return (void *)((struct ubifs_branch *)branch)->key;
+}
+
+/**
+ * ubifs_tnc_lookup - look up a file-system node.
+ * @c: UBIFS file-system description object
+ * @key: node key to lookup
+ * @node: the node is returned here
+ *
+ * This function look up and reads node with key @key. The caller has to make
+ * sure the @node buffer is large enough to fit the node. Returns zero in case
+ * of success, %-ENOENT if the node was not found, and a negative error code in
+ * case of failure.
+ */
+static inline int ubifs_tnc_lookup(struct ubifs_info *c,
+ const union ubifs_key *key, void *node)
+{
+ return ubifs_tnc_locate(c, key, node, NULL, NULL);
+}
+
+/**
+ * ubifs_get_lprops - get reference to LEB properties.
+ * @c: the UBIFS file-system description object
+ *
+ * This function locks lprops. Lprops have to be unlocked by
+ * 'ubifs_release_lprops()'.
+ */
+static inline void ubifs_get_lprops(struct ubifs_info *c)
+{
+ mutex_lock(&c->lp_mutex);
+}
+
+/**
+ * ubifs_release_lprops - release lprops lock.
+ * @c: the UBIFS file-system description object
+ *
+ * This function has to be called after each 'ubifs_get_lprops()' call to
+ * unlock lprops.
+ */
+static inline void ubifs_release_lprops(struct ubifs_info *c)
+{
+ ubifs_assert(mutex_is_locked(&c->lp_mutex));
+ ubifs_assert(c->lst.empty_lebs >= 0 &&
+ c->lst.empty_lebs <= c->main_lebs);
+ mutex_unlock(&c->lp_mutex);
+}
+
+#endif /* __UBIFS_MISC_H__ */
diff --git a/qemu/roms/u-boot/fs/ubifs/orphan.c b/qemu/roms/u-boot/fs/ubifs/orphan.c
new file mode 100644
index 000000000..d091031b8
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/orphan.c
@@ -0,0 +1,316 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Author: Adrian Hunter
+ */
+
+#include "ubifs.h"
+
+/*
+ * An orphan is an inode number whose inode node has been committed to the index
+ * with a link count of zero. That happens when an open file is deleted
+ * (unlinked) and then a commit is run. In the normal course of events the inode
+ * would be deleted when the file is closed. However in the case of an unclean
+ * unmount, orphans need to be accounted for. After an unclean unmount, the
+ * orphans' inodes must be deleted which means either scanning the entire index
+ * looking for them, or keeping a list on flash somewhere. This unit implements
+ * the latter approach.
+ *
+ * The orphan area is a fixed number of LEBs situated between the LPT area and
+ * the main area. The number of orphan area LEBs is specified when the file
+ * system is created. The minimum number is 1. The size of the orphan area
+ * should be so that it can hold the maximum number of orphans that are expected
+ * to ever exist at one time.
+ *
+ * The number of orphans that can fit in a LEB is:
+ *
+ * (c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64)
+ *
+ * For example: a 15872 byte LEB can fit 1980 orphans so 1 LEB may be enough.
+ *
+ * Orphans are accumulated in a rb-tree. When an inode's link count drops to
+ * zero, the inode number is added to the rb-tree. It is removed from the tree
+ * when the inode is deleted. Any new orphans that are in the orphan tree when
+ * the commit is run, are written to the orphan area in 1 or more orphan nodes.
+ * If the orphan area is full, it is consolidated to make space. There is
+ * always enough space because validation prevents the user from creating more
+ * than the maximum number of orphans allowed.
+ */
+
+/**
+ * tot_avail_orphs - calculate total space.
+ * @c: UBIFS file-system description object
+ *
+ * This function returns the number of orphans that can be written in half
+ * the total space. That leaves half the space for adding new orphans.
+ */
+static int tot_avail_orphs(struct ubifs_info *c)
+{
+ int avail_lebs, avail;
+
+ avail_lebs = c->orph_lebs;
+ avail = avail_lebs *
+ ((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64));
+ return avail / 2;
+}
+
+/**
+ * ubifs_clear_orphans - erase all LEBs used for orphans.
+ * @c: UBIFS file-system description object
+ *
+ * If recovery is not required, then the orphans from the previous session
+ * are not needed. This function locates the LEBs used to record
+ * orphans, and un-maps them.
+ */
+int ubifs_clear_orphans(struct ubifs_info *c)
+{
+ int lnum, err;
+
+ for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
+ err = ubifs_leb_unmap(c, lnum);
+ if (err)
+ return err;
+ }
+ c->ohead_lnum = c->orph_first;
+ c->ohead_offs = 0;
+ return 0;
+}
+
+/**
+ * insert_dead_orphan - insert an orphan.
+ * @c: UBIFS file-system description object
+ * @inum: orphan inode number
+ *
+ * This function is a helper to the 'do_kill_orphans()' function. The orphan
+ * must be kept until the next commit, so it is added to the rb-tree and the
+ * deletion list.
+ */
+static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
+{
+ struct ubifs_orphan *orphan, *o;
+ struct rb_node **p, *parent = NULL;
+
+ orphan = kzalloc(sizeof(struct ubifs_orphan), GFP_KERNEL);
+ if (!orphan)
+ return -ENOMEM;
+ orphan->inum = inum;
+
+ p = &c->orph_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ o = rb_entry(parent, struct ubifs_orphan, rb);
+ if (inum < o->inum)
+ p = &(*p)->rb_left;
+ else if (inum > o->inum)
+ p = &(*p)->rb_right;
+ else {
+ /* Already added - no problem */
+ kfree(orphan);
+ return 0;
+ }
+ }
+ c->tot_orphans += 1;
+ rb_link_node(&orphan->rb, parent, p);
+ rb_insert_color(&orphan->rb, &c->orph_tree);
+ list_add_tail(&orphan->list, &c->orph_list);
+ orphan->dnext = c->orph_dnext;
+ c->orph_dnext = orphan;
+ dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
+ c->new_orphans, c->tot_orphans);
+ return 0;
+}
+
+/**
+ * do_kill_orphans - remove orphan inodes from the index.
+ * @c: UBIFS file-system description object
+ * @sleb: scanned LEB
+ * @last_cmt_no: cmt_no of last orphan node read is passed and returned here
+ * @outofdate: whether the LEB is out of date is returned here
+ * @last_flagged: whether the end orphan node is encountered
+ *
+ * This function is a helper to the 'kill_orphans()' function. It goes through
+ * every orphan node in a LEB and for every inode number recorded, removes
+ * all keys for that inode from the TNC.
+ */
+static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ unsigned long long *last_cmt_no, int *outofdate,
+ int *last_flagged)
+{
+ struct ubifs_scan_node *snod;
+ struct ubifs_orph_node *orph;
+ unsigned long long cmt_no;
+ ino_t inum;
+ int i, n, err, first = 1;
+
+ list_for_each_entry(snod, &sleb->nodes, list) {
+ if (snod->type != UBIFS_ORPH_NODE) {
+ ubifs_err("invalid node type %d in orphan area at "
+ "%d:%d", snod->type, sleb->lnum, snod->offs);
+ dbg_dump_node(c, snod->node);
+ return -EINVAL;
+ }
+
+ orph = snod->node;
+
+ /* Check commit number */
+ cmt_no = le64_to_cpu(orph->cmt_no) & LLONG_MAX;
+ /*
+ * The commit number on the master node may be less, because
+ * of a failed commit. If there are several failed commits in a
+ * row, the commit number written on orphan nodes will continue
+ * to increase (because the commit number is adjusted here) even
+ * though the commit number on the master node stays the same
+ * because the master node has not been re-written.
+ */
+ if (cmt_no > c->cmt_no)
+ c->cmt_no = cmt_no;
+ if (cmt_no < *last_cmt_no && *last_flagged) {
+ /*
+ * The last orphan node had a higher commit number and
+ * was flagged as the last written for that commit
+ * number. That makes this orphan node, out of date.
+ */
+ if (!first) {
+ ubifs_err("out of order commit number %llu in "
+ "orphan node at %d:%d",
+ cmt_no, sleb->lnum, snod->offs);
+ dbg_dump_node(c, snod->node);
+ return -EINVAL;
+ }
+ dbg_rcvry("out of date LEB %d", sleb->lnum);
+ *outofdate = 1;
+ return 0;
+ }
+
+ if (first)
+ first = 0;
+
+ n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
+ for (i = 0; i < n; i++) {
+ inum = le64_to_cpu(orph->inos[i]);
+ dbg_rcvry("deleting orphaned inode %lu",
+ (unsigned long)inum);
+ err = ubifs_tnc_remove_ino(c, inum);
+ if (err)
+ return err;
+ err = insert_dead_orphan(c, inum);
+ if (err)
+ return err;
+ }
+
+ *last_cmt_no = cmt_no;
+ if (le64_to_cpu(orph->cmt_no) & (1ULL << 63)) {
+ dbg_rcvry("last orph node for commit %llu at %d:%d",
+ cmt_no, sleb->lnum, snod->offs);
+ *last_flagged = 1;
+ } else
+ *last_flagged = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * kill_orphans - remove all orphan inodes from the index.
+ * @c: UBIFS file-system description object
+ *
+ * If recovery is required, then orphan inodes recorded during the previous
+ * session (which ended with an unclean unmount) must be deleted from the index.
+ * This is done by updating the TNC, but since the index is not updated until
+ * the next commit, the LEBs where the orphan information is recorded are not
+ * erased until the next commit.
+ */
+static int kill_orphans(struct ubifs_info *c)
+{
+ unsigned long long last_cmt_no = 0;
+ int lnum, err = 0, outofdate = 0, last_flagged = 0;
+
+ c->ohead_lnum = c->orph_first;
+ c->ohead_offs = 0;
+ /* Check no-orphans flag and skip this if no orphans */
+ if (c->no_orphs) {
+ dbg_rcvry("no orphans");
+ return 0;
+ }
+ /*
+ * Orph nodes always start at c->orph_first and are written to each
+ * successive LEB in turn. Generally unused LEBs will have been unmapped
+ * but may contain out of date orphan nodes if the unmap didn't go
+ * through. In addition, the last orphan node written for each commit is
+ * marked (top bit of orph->cmt_no is set to 1). It is possible that
+ * there are orphan nodes from the next commit (i.e. the commit did not
+ * complete successfully). In that case, no orphans will have been lost
+ * due to the way that orphans are written, and any orphans added will
+ * be valid orphans anyway and so can be deleted.
+ */
+ for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
+ struct ubifs_scan_leb *sleb;
+
+ dbg_rcvry("LEB %d", lnum);
+ sleb = ubifs_scan(c, lnum, 0, c->sbuf);
+ if (IS_ERR(sleb)) {
+ sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0);
+ if (IS_ERR(sleb)) {
+ err = PTR_ERR(sleb);
+ break;
+ }
+ }
+ err = do_kill_orphans(c, sleb, &last_cmt_no, &outofdate,
+ &last_flagged);
+ if (err || outofdate) {
+ ubifs_scan_destroy(sleb);
+ break;
+ }
+ if (sleb->endpt) {
+ c->ohead_lnum = lnum;
+ c->ohead_offs = sleb->endpt;
+ }
+ ubifs_scan_destroy(sleb);
+ }
+ return err;
+}
+
+/**
+ * ubifs_mount_orphans - delete orphan inodes and erase LEBs that recorded them.
+ * @c: UBIFS file-system description object
+ * @unclean: indicates recovery from unclean unmount
+ * @read_only: indicates read only mount
+ *
+ * This function is called when mounting to erase orphans from the previous
+ * session. If UBIFS was not unmounted cleanly, then the inodes recorded as
+ * orphans are deleted.
+ */
+int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only)
+{
+ int err = 0;
+
+ c->max_orphans = tot_avail_orphs(c);
+
+ if (!read_only) {
+ c->orph_buf = vmalloc(c->leb_size);
+ if (!c->orph_buf)
+ return -ENOMEM;
+ }
+
+ if (unclean)
+ err = kill_orphans(c);
+ else if (!read_only)
+ err = ubifs_clear_orphans(c);
+
+ return err;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/recovery.c b/qemu/roms/u-boot/fs/ubifs/recovery.c
new file mode 100644
index 000000000..744465005
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/recovery.c
@@ -0,0 +1,1225 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements functions needed to recover from unclean un-mounts.
+ * When UBIFS is mounted, it checks a flag on the master node to determine if
+ * an un-mount was completed sucessfully. If not, the process of mounting
+ * incorparates additional checking and fixing of on-flash data structures.
+ * UBIFS always cleans away all remnants of an unclean un-mount, so that
+ * errors do not accumulate. However UBIFS defers recovery if it is mounted
+ * read-only, and the flash is not modified in that case.
+ */
+
+#include "ubifs.h"
+
+/**
+ * is_empty - determine whether a buffer is empty (contains all 0xff).
+ * @buf: buffer to clean
+ * @len: length of buffer
+ *
+ * This function returns %1 if the buffer is empty (contains all 0xff) otherwise
+ * %0 is returned.
+ */
+static int is_empty(void *buf, int len)
+{
+ uint8_t *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (*p++ != 0xff)
+ return 0;
+ return 1;
+}
+
+/**
+ * get_master_node - get the last valid master node allowing for corruption.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number
+ * @pbuf: buffer containing the LEB read, is returned here
+ * @mst: master node, if found, is returned here
+ * @cor: corruption, if found, is returned here
+ *
+ * This function allocates a buffer, reads the LEB into it, and finds and
+ * returns the last valid master node allowing for one area of corruption.
+ * The corrupt area, if there is one, must be consistent with the assumption
+ * that it is the result of an unclean unmount while the master node was being
+ * written. Under those circumstances, it is valid to use the previously written
+ * master node.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf,
+ struct ubifs_mst_node **mst, void **cor)
+{
+ const int sz = c->mst_node_alsz;
+ int err, offs, len;
+ void *sbuf, *buf;
+
+ sbuf = vmalloc(c->leb_size);
+ if (!sbuf)
+ return -ENOMEM;
+
+ err = ubi_read(c->ubi, lnum, sbuf, 0, c->leb_size);
+ if (err && err != -EBADMSG)
+ goto out_free;
+
+ /* Find the first position that is definitely not a node */
+ offs = 0;
+ buf = sbuf;
+ len = c->leb_size;
+ while (offs + UBIFS_MST_NODE_SZ <= c->leb_size) {
+ struct ubifs_ch *ch = buf;
+
+ if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
+ break;
+ offs += sz;
+ buf += sz;
+ len -= sz;
+ }
+ /* See if there was a valid master node before that */
+ if (offs) {
+ int ret;
+
+ offs -= sz;
+ buf -= sz;
+ len += sz;
+ ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
+ if (ret != SCANNED_A_NODE && offs) {
+ /* Could have been corruption so check one place back */
+ offs -= sz;
+ buf -= sz;
+ len += sz;
+ ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
+ if (ret != SCANNED_A_NODE)
+ /*
+ * We accept only one area of corruption because
+ * we are assuming that it was caused while
+ * trying to write a master node.
+ */
+ goto out_err;
+ }
+ if (ret == SCANNED_A_NODE) {
+ struct ubifs_ch *ch = buf;
+
+ if (ch->node_type != UBIFS_MST_NODE)
+ goto out_err;
+ dbg_rcvry("found a master node at %d:%d", lnum, offs);
+ *mst = buf;
+ offs += sz;
+ buf += sz;
+ len -= sz;
+ }
+ }
+ /* Check for corruption */
+ if (offs < c->leb_size) {
+ if (!is_empty(buf, min_t(int, len, sz))) {
+ *cor = buf;
+ dbg_rcvry("found corruption at %d:%d", lnum, offs);
+ }
+ offs += sz;
+ buf += sz;
+ len -= sz;
+ }
+ /* Check remaining empty space */
+ if (offs < c->leb_size)
+ if (!is_empty(buf, len))
+ goto out_err;
+ *pbuf = sbuf;
+ return 0;
+
+out_err:
+ err = -EINVAL;
+out_free:
+ vfree(sbuf);
+ *mst = NULL;
+ *cor = NULL;
+ return err;
+}
+
+/**
+ * write_rcvrd_mst_node - write recovered master node.
+ * @c: UBIFS file-system description object
+ * @mst: master node
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int write_rcvrd_mst_node(struct ubifs_info *c,
+ struct ubifs_mst_node *mst)
+{
+ int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz;
+ __le32 save_flags;
+
+ dbg_rcvry("recovery");
+
+ save_flags = mst->flags;
+ mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
+
+ ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
+ err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM);
+ if (err)
+ goto out;
+ err = ubi_leb_change(c->ubi, lnum + 1, mst, sz, UBI_SHORTTERM);
+ if (err)
+ goto out;
+out:
+ mst->flags = save_flags;
+ return err;
+}
+
+/**
+ * ubifs_recover_master_node - recover the master node.
+ * @c: UBIFS file-system description object
+ *
+ * This function recovers the master node from corruption that may occur due to
+ * an unclean unmount.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_recover_master_node(struct ubifs_info *c)
+{
+ void *buf1 = NULL, *buf2 = NULL, *cor1 = NULL, *cor2 = NULL;
+ struct ubifs_mst_node *mst1 = NULL, *mst2 = NULL, *mst;
+ const int sz = c->mst_node_alsz;
+ int err, offs1, offs2;
+
+ dbg_rcvry("recovery");
+
+ err = get_master_node(c, UBIFS_MST_LNUM, &buf1, &mst1, &cor1);
+ if (err)
+ goto out_free;
+
+ err = get_master_node(c, UBIFS_MST_LNUM + 1, &buf2, &mst2, &cor2);
+ if (err)
+ goto out_free;
+
+ if (mst1) {
+ offs1 = (void *)mst1 - buf1;
+ if ((le32_to_cpu(mst1->flags) & UBIFS_MST_RCVRY) &&
+ (offs1 == 0 && !cor1)) {
+ /*
+ * mst1 was written by recovery at offset 0 with no
+ * corruption.
+ */
+ dbg_rcvry("recovery recovery");
+ mst = mst1;
+ } else if (mst2) {
+ offs2 = (void *)mst2 - buf2;
+ if (offs1 == offs2) {
+ /* Same offset, so must be the same */
+ if (memcmp((void *)mst1 + UBIFS_CH_SZ,
+ (void *)mst2 + UBIFS_CH_SZ,
+ UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
+ goto out_err;
+ mst = mst1;
+ } else if (offs2 + sz == offs1) {
+ /* 1st LEB was written, 2nd was not */
+ if (cor1)
+ goto out_err;
+ mst = mst1;
+ } else if (offs1 == 0 && offs2 + sz >= c->leb_size) {
+ /* 1st LEB was unmapped and written, 2nd not */
+ if (cor1)
+ goto out_err;
+ mst = mst1;
+ } else
+ goto out_err;
+ } else {
+ /*
+ * 2nd LEB was unmapped and about to be written, so
+ * there must be only one master node in the first LEB
+ * and no corruption.
+ */
+ if (offs1 != 0 || cor1)
+ goto out_err;
+ mst = mst1;
+ }
+ } else {
+ if (!mst2)
+ goto out_err;
+ /*
+ * 1st LEB was unmapped and about to be written, so there must
+ * be no room left in 2nd LEB.
+ */
+ offs2 = (void *)mst2 - buf2;
+ if (offs2 + sz + sz <= c->leb_size)
+ goto out_err;
+ mst = mst2;
+ }
+
+ dbg_rcvry("recovered master node from LEB %d",
+ (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
+
+ memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
+
+ if ((c->vfs_sb->s_flags & MS_RDONLY)) {
+ /* Read-only mode. Keep a copy for switching to rw mode */
+ c->rcvrd_mst_node = kmalloc(sz, GFP_KERNEL);
+ if (!c->rcvrd_mst_node) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
+ }
+
+ vfree(buf2);
+ vfree(buf1);
+
+ return 0;
+
+out_err:
+ err = -EINVAL;
+out_free:
+ ubifs_err("failed to recover master node");
+ if (mst1) {
+ dbg_err("dumping first master node");
+ dbg_dump_node(c, mst1);
+ }
+ if (mst2) {
+ dbg_err("dumping second master node");
+ dbg_dump_node(c, mst2);
+ }
+ vfree(buf2);
+ vfree(buf1);
+ return err;
+}
+
+/**
+ * ubifs_write_rcvrd_mst_node - write the recovered master node.
+ * @c: UBIFS file-system description object
+ *
+ * This function writes the master node that was recovered during mounting in
+ * read-only mode and must now be written because we are remounting rw.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_write_rcvrd_mst_node(struct ubifs_info *c)
+{
+ int err;
+
+ if (!c->rcvrd_mst_node)
+ return 0;
+ c->rcvrd_mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
+ err = write_rcvrd_mst_node(c, c->rcvrd_mst_node);
+ if (err)
+ return err;
+ kfree(c->rcvrd_mst_node);
+ c->rcvrd_mst_node = NULL;
+ return 0;
+}
+
+/**
+ * is_last_write - determine if an offset was in the last write to a LEB.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to check
+ * @offs: offset to check
+ *
+ * This function returns %1 if @offs was in the last write to the LEB whose data
+ * is in @buf, otherwise %0 is returned. The determination is made by checking
+ * for subsequent empty space starting from the next min_io_size boundary (or a
+ * bit less than the common header size if min_io_size is one).
+ */
+static int is_last_write(const struct ubifs_info *c, void *buf, int offs)
+{
+ int empty_offs;
+ int check_len;
+ uint8_t *p;
+
+ if (c->min_io_size == 1) {
+ check_len = c->leb_size - offs;
+ p = buf + check_len;
+ for (; check_len > 0; check_len--)
+ if (*--p != 0xff)
+ break;
+ /*
+ * 'check_len' is the size of the corruption which cannot be
+ * more than the size of 1 node if it was caused by an unclean
+ * unmount.
+ */
+ if (check_len > UBIFS_MAX_NODE_SZ)
+ return 0;
+ return 1;
+ }
+
+ /*
+ * Round up to the next c->min_io_size boundary i.e. 'offs' is in the
+ * last wbuf written. After that should be empty space.
+ */
+ empty_offs = ALIGN(offs + 1, c->min_io_size);
+ check_len = c->leb_size - empty_offs;
+ p = buf + empty_offs - offs;
+
+ for (; check_len > 0; check_len--)
+ if (*p++ != 0xff)
+ return 0;
+ return 1;
+}
+
+/**
+ * clean_buf - clean the data from an LEB sitting in a buffer.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to clean
+ * @lnum: LEB number to clean
+ * @offs: offset from which to clean
+ * @len: length of buffer
+ *
+ * This function pads up to the next min_io_size boundary (if there is one) and
+ * sets empty space to all 0xff. @buf, @offs and @len are updated to the next
+ * min_io_size boundary (if there is one).
+ */
+static void clean_buf(const struct ubifs_info *c, void **buf, int lnum,
+ int *offs, int *len)
+{
+ int empty_offs, pad_len;
+
+ lnum = lnum;
+ dbg_rcvry("cleaning corruption at %d:%d", lnum, *offs);
+
+ if (c->min_io_size == 1) {
+ memset(*buf, 0xff, c->leb_size - *offs);
+ return;
+ }
+
+ ubifs_assert(!(*offs & 7));
+ empty_offs = ALIGN(*offs, c->min_io_size);
+ pad_len = empty_offs - *offs;
+ ubifs_pad(c, *buf, pad_len);
+ *offs += pad_len;
+ *buf += pad_len;
+ *len -= pad_len;
+ memset(*buf, 0xff, c->leb_size - empty_offs);
+}
+
+/**
+ * no_more_nodes - determine if there are no more nodes in a buffer.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to check
+ * @len: length of buffer
+ * @lnum: LEB number of the LEB from which @buf was read
+ * @offs: offset from which @buf was read
+ *
+ * This function ensures that the corrupted node at @offs is the last thing
+ * written to a LEB. This function returns %1 if more data is not found and
+ * %0 if more data is found.
+ */
+static int no_more_nodes(const struct ubifs_info *c, void *buf, int len,
+ int lnum, int offs)
+{
+ struct ubifs_ch *ch = buf;
+ int skip, dlen = le32_to_cpu(ch->len);
+
+ /* Check for empty space after the corrupt node's common header */
+ skip = ALIGN(offs + UBIFS_CH_SZ, c->min_io_size) - offs;
+ if (is_empty(buf + skip, len - skip))
+ return 1;
+ /*
+ * The area after the common header size is not empty, so the common
+ * header must be intact. Check it.
+ */
+ if (ubifs_check_node(c, buf, lnum, offs, 1, 0) != -EUCLEAN) {
+ dbg_rcvry("unexpected bad common header at %d:%d", lnum, offs);
+ return 0;
+ }
+ /* Now we know the corrupt node's length we can skip over it */
+ skip = ALIGN(offs + dlen, c->min_io_size) - offs;
+ /* After which there should be empty space */
+ if (is_empty(buf + skip, len - skip))
+ return 1;
+ dbg_rcvry("unexpected data at %d:%d", lnum, offs + skip);
+ return 0;
+}
+
+/**
+ * fix_unclean_leb - fix an unclean LEB.
+ * @c: UBIFS file-system description object
+ * @sleb: scanned LEB information
+ * @start: offset where scan started
+ */
+static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ int start)
+{
+ int lnum = sleb->lnum, endpt = start;
+
+ /* Get the end offset of the last node we are keeping */
+ if (!list_empty(&sleb->nodes)) {
+ struct ubifs_scan_node *snod;
+
+ snod = list_entry(sleb->nodes.prev,
+ struct ubifs_scan_node, list);
+ endpt = snod->offs + snod->len;
+ }
+
+ if ((c->vfs_sb->s_flags & MS_RDONLY) && !c->remounting_rw) {
+ /* Add to recovery list */
+ struct ubifs_unclean_leb *ucleb;
+
+ dbg_rcvry("need to fix LEB %d start %d endpt %d",
+ lnum, start, sleb->endpt);
+ ucleb = kzalloc(sizeof(struct ubifs_unclean_leb), GFP_NOFS);
+ if (!ucleb)
+ return -ENOMEM;
+ ucleb->lnum = lnum;
+ ucleb->endpt = endpt;
+ list_add_tail(&ucleb->list, &c->unclean_leb_list);
+ }
+ return 0;
+}
+
+/**
+ * drop_incomplete_group - drop nodes from an incomplete group.
+ * @sleb: scanned LEB information
+ * @offs: offset of dropped nodes is returned here
+ *
+ * This function returns %1 if nodes are dropped and %0 otherwise.
+ */
+static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
+{
+ int dropped = 0;
+
+ while (!list_empty(&sleb->nodes)) {
+ struct ubifs_scan_node *snod;
+ struct ubifs_ch *ch;
+
+ snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
+ list);
+ ch = snod->node;
+ if (ch->group_type != UBIFS_IN_NODE_GROUP)
+ return dropped;
+ dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
+ *offs = snod->offs;
+ list_del(&snod->list);
+ kfree(snod);
+ sleb->nodes_cnt -= 1;
+ dropped = 1;
+ }
+ return dropped;
+}
+
+/**
+ * ubifs_recover_leb - scan and recover a LEB.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number
+ * @offs: offset
+ * @sbuf: LEB-sized buffer to use
+ * @grouped: nodes may be grouped for recovery
+ *
+ * This function does a scan of a LEB, but caters for errors that might have
+ * been caused by the unclean unmount from which we are attempting to recover.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
+ int offs, void *sbuf, int grouped)
+{
+ int err, len = c->leb_size - offs, need_clean = 0, quiet = 1;
+ int empty_chkd = 0, start = offs;
+ struct ubifs_scan_leb *sleb;
+ void *buf = sbuf + offs;
+
+ dbg_rcvry("%d:%d", lnum, offs);
+
+ sleb = ubifs_start_scan(c, lnum, offs, sbuf);
+ if (IS_ERR(sleb))
+ return sleb;
+
+ if (sleb->ecc)
+ need_clean = 1;
+
+ while (len >= 8) {
+ int ret;
+
+ dbg_scan("look at LEB %d:%d (%d bytes left)",
+ lnum, offs, len);
+
+ cond_resched();
+
+ /*
+ * Scan quietly until there is an error from which we cannot
+ * recover
+ */
+ ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
+
+ if (ret == SCANNED_A_NODE) {
+ /* A valid node, and not a padding node */
+ struct ubifs_ch *ch = buf;
+ int node_len;
+
+ err = ubifs_add_snod(c, sleb, buf, offs);
+ if (err)
+ goto error;
+ node_len = ALIGN(le32_to_cpu(ch->len), 8);
+ offs += node_len;
+ buf += node_len;
+ len -= node_len;
+ continue;
+ }
+
+ if (ret > 0) {
+ /* Padding bytes or a valid padding node */
+ offs += ret;
+ buf += ret;
+ len -= ret;
+ continue;
+ }
+
+ if (ret == SCANNED_EMPTY_SPACE) {
+ if (!is_empty(buf, len)) {
+ if (!is_last_write(c, buf, offs))
+ break;
+ clean_buf(c, &buf, lnum, &offs, &len);
+ need_clean = 1;
+ }
+ empty_chkd = 1;
+ break;
+ }
+
+ if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE)
+ if (is_last_write(c, buf, offs)) {
+ clean_buf(c, &buf, lnum, &offs, &len);
+ need_clean = 1;
+ empty_chkd = 1;
+ break;
+ }
+
+ if (ret == SCANNED_A_CORRUPT_NODE)
+ if (no_more_nodes(c, buf, len, lnum, offs)) {
+ clean_buf(c, &buf, lnum, &offs, &len);
+ need_clean = 1;
+ empty_chkd = 1;
+ break;
+ }
+
+ if (quiet) {
+ /* Redo the last scan but noisily */
+ quiet = 0;
+ continue;
+ }
+
+ switch (ret) {
+ case SCANNED_GARBAGE:
+ dbg_err("garbage");
+ goto corrupted;
+ case SCANNED_A_CORRUPT_NODE:
+ case SCANNED_A_BAD_PAD_NODE:
+ dbg_err("bad node");
+ goto corrupted;
+ default:
+ dbg_err("unknown");
+ goto corrupted;
+ }
+ }
+
+ if (!empty_chkd && !is_empty(buf, len)) {
+ if (is_last_write(c, buf, offs)) {
+ clean_buf(c, &buf, lnum, &offs, &len);
+ need_clean = 1;
+ } else {
+ ubifs_err("corrupt empty space at LEB %d:%d",
+ lnum, offs);
+ goto corrupted;
+ }
+ }
+
+ /* Drop nodes from incomplete group */
+ if (grouped && drop_incomplete_group(sleb, &offs)) {
+ buf = sbuf + offs;
+ len = c->leb_size - offs;
+ clean_buf(c, &buf, lnum, &offs, &len);
+ need_clean = 1;
+ }
+
+ if (offs % c->min_io_size) {
+ clean_buf(c, &buf, lnum, &offs, &len);
+ need_clean = 1;
+ }
+
+ ubifs_end_scan(c, sleb, lnum, offs);
+
+ if (need_clean) {
+ err = fix_unclean_leb(c, sleb, start);
+ if (err)
+ goto error;
+ }
+
+ return sleb;
+
+corrupted:
+ ubifs_scanned_corruption(c, lnum, offs, buf);
+ err = -EUCLEAN;
+error:
+ ubifs_err("LEB %d scanning failed", lnum);
+ ubifs_scan_destroy(sleb);
+ return ERR_PTR(err);
+}
+
+/**
+ * get_cs_sqnum - get commit start sequence number.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number of commit start node
+ * @offs: offset of commit start node
+ * @cs_sqnum: commit start sequence number is returned here
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
+ unsigned long long *cs_sqnum)
+{
+ struct ubifs_cs_node *cs_node = NULL;
+ int err, ret;
+
+ dbg_rcvry("at %d:%d", lnum, offs);
+ cs_node = kmalloc(UBIFS_CS_NODE_SZ, GFP_KERNEL);
+ if (!cs_node)
+ return -ENOMEM;
+ if (c->leb_size - offs < UBIFS_CS_NODE_SZ)
+ goto out_err;
+ err = ubi_read(c->ubi, lnum, (void *)cs_node, offs, UBIFS_CS_NODE_SZ);
+ if (err && err != -EBADMSG)
+ goto out_free;
+ ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
+ if (ret != SCANNED_A_NODE) {
+ dbg_err("Not a valid node");
+ goto out_err;
+ }
+ if (cs_node->ch.node_type != UBIFS_CS_NODE) {
+ dbg_err("Node a CS node, type is %d", cs_node->ch.node_type);
+ goto out_err;
+ }
+ if (le64_to_cpu(cs_node->cmt_no) != c->cmt_no) {
+ dbg_err("CS node cmt_no %llu != current cmt_no %llu",
+ (unsigned long long)le64_to_cpu(cs_node->cmt_no),
+ c->cmt_no);
+ goto out_err;
+ }
+ *cs_sqnum = le64_to_cpu(cs_node->ch.sqnum);
+ dbg_rcvry("commit start sqnum %llu", *cs_sqnum);
+ kfree(cs_node);
+ return 0;
+
+out_err:
+ err = -EINVAL;
+out_free:
+ ubifs_err("failed to get CS sqnum");
+ kfree(cs_node);
+ return err;
+}
+
+/**
+ * ubifs_recover_log_leb - scan and recover a log LEB.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number
+ * @offs: offset
+ * @sbuf: LEB-sized buffer to use
+ *
+ * This function does a scan of a LEB, but caters for errors that might have
+ * been caused by the unclean unmount from which we are attempting to recover.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
+ int offs, void *sbuf)
+{
+ struct ubifs_scan_leb *sleb;
+ int next_lnum;
+
+ dbg_rcvry("LEB %d", lnum);
+ next_lnum = lnum + 1;
+ if (next_lnum >= UBIFS_LOG_LNUM + c->log_lebs)
+ next_lnum = UBIFS_LOG_LNUM;
+ if (next_lnum != c->ltail_lnum) {
+ /*
+ * We can only recover at the end of the log, so check that the
+ * next log LEB is empty or out of date.
+ */
+ sleb = ubifs_scan(c, next_lnum, 0, sbuf);
+ if (IS_ERR(sleb))
+ return sleb;
+ if (sleb->nodes_cnt) {
+ struct ubifs_scan_node *snod;
+ unsigned long long cs_sqnum = c->cs_sqnum;
+
+ snod = list_entry(sleb->nodes.next,
+ struct ubifs_scan_node, list);
+ if (cs_sqnum == 0) {
+ int err;
+
+ err = get_cs_sqnum(c, lnum, offs, &cs_sqnum);
+ if (err) {
+ ubifs_scan_destroy(sleb);
+ return ERR_PTR(err);
+ }
+ }
+ if (snod->sqnum > cs_sqnum) {
+ ubifs_err("unrecoverable log corruption "
+ "in LEB %d", lnum);
+ ubifs_scan_destroy(sleb);
+ return ERR_PTR(-EUCLEAN);
+ }
+ }
+ ubifs_scan_destroy(sleb);
+ }
+ return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
+}
+
+/**
+ * recover_head - recover a head.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number of head to recover
+ * @offs: offset of head to recover
+ * @sbuf: LEB-sized buffer to use
+ *
+ * This function ensures that there is no data on the flash at a head location.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int recover_head(const struct ubifs_info *c, int lnum, int offs,
+ void *sbuf)
+{
+ int len, err, need_clean = 0;
+
+ if (c->min_io_size > 1)
+ len = c->min_io_size;
+ else
+ len = 512;
+ if (offs + len > c->leb_size)
+ len = c->leb_size - offs;
+
+ if (!len)
+ return 0;
+
+ /* Read at the head location and check it is empty flash */
+ err = ubi_read(c->ubi, lnum, sbuf, offs, len);
+ if (err)
+ need_clean = 1;
+ else {
+ uint8_t *p = sbuf;
+
+ while (len--)
+ if (*p++ != 0xff) {
+ need_clean = 1;
+ break;
+ }
+ }
+
+ if (need_clean) {
+ dbg_rcvry("cleaning head at %d:%d", lnum, offs);
+ if (offs == 0)
+ return ubifs_leb_unmap(c, lnum);
+ err = ubi_read(c->ubi, lnum, sbuf, 0, offs);
+ if (err)
+ return err;
+ return ubi_leb_change(c->ubi, lnum, sbuf, offs, UBI_UNKNOWN);
+ }
+
+ return 0;
+}
+
+/**
+ * ubifs_recover_inl_heads - recover index and LPT heads.
+ * @c: UBIFS file-system description object
+ * @sbuf: LEB-sized buffer to use
+ *
+ * This function ensures that there is no data on the flash at the index and
+ * LPT head locations.
+ *
+ * This deals with the recovery of a half-completed journal commit. UBIFS is
+ * careful never to overwrite the last version of the index or the LPT. Because
+ * the index and LPT are wandering trees, data from a half-completed commit will
+ * not be referenced anywhere in UBIFS. The data will be either in LEBs that are
+ * assumed to be empty and will be unmapped anyway before use, or in the index
+ * and LPT heads.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
+{
+ int err;
+
+ ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY) || c->remounting_rw);
+
+ dbg_rcvry("checking index head at %d:%d", c->ihead_lnum, c->ihead_offs);
+ err = recover_head(c, c->ihead_lnum, c->ihead_offs, sbuf);
+ if (err)
+ return err;
+
+ dbg_rcvry("checking LPT head at %d:%d", c->nhead_lnum, c->nhead_offs);
+ err = recover_head(c, c->nhead_lnum, c->nhead_offs, sbuf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * clean_an_unclean_leb - read and write a LEB to remove corruption.
+ * @c: UBIFS file-system description object
+ * @ucleb: unclean LEB information
+ * @sbuf: LEB-sized buffer to use
+ *
+ * This function reads a LEB up to a point pre-determined by the mount recovery,
+ * checks the nodes, and writes the result back to the flash, thereby cleaning
+ * off any following corruption, or non-fatal ECC errors.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+static int clean_an_unclean_leb(const struct ubifs_info *c,
+ struct ubifs_unclean_leb *ucleb, void *sbuf)
+{
+ int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1;
+ void *buf = sbuf;
+
+ dbg_rcvry("LEB %d len %d", lnum, len);
+
+ if (len == 0) {
+ /* Nothing to read, just unmap it */
+ err = ubifs_leb_unmap(c, lnum);
+ if (err)
+ return err;
+ return 0;
+ }
+
+ err = ubi_read(c->ubi, lnum, buf, offs, len);
+ if (err && err != -EBADMSG)
+ return err;
+
+ while (len >= 8) {
+ int ret;
+
+ cond_resched();
+
+ /* Scan quietly until there is an error */
+ ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
+
+ if (ret == SCANNED_A_NODE) {
+ /* A valid node, and not a padding node */
+ struct ubifs_ch *ch = buf;
+ int node_len;
+
+ node_len = ALIGN(le32_to_cpu(ch->len), 8);
+ offs += node_len;
+ buf += node_len;
+ len -= node_len;
+ continue;
+ }
+
+ if (ret > 0) {
+ /* Padding bytes or a valid padding node */
+ offs += ret;
+ buf += ret;
+ len -= ret;
+ continue;
+ }
+
+ if (ret == SCANNED_EMPTY_SPACE) {
+ ubifs_err("unexpected empty space at %d:%d",
+ lnum, offs);
+ return -EUCLEAN;
+ }
+
+ if (quiet) {
+ /* Redo the last scan but noisily */
+ quiet = 0;
+ continue;
+ }
+
+ ubifs_scanned_corruption(c, lnum, offs, buf);
+ return -EUCLEAN;
+ }
+
+ /* Pad to min_io_size */
+ len = ALIGN(ucleb->endpt, c->min_io_size);
+ if (len > ucleb->endpt) {
+ int pad_len = len - ALIGN(ucleb->endpt, 8);
+
+ if (pad_len > 0) {
+ buf = c->sbuf + len - pad_len;
+ ubifs_pad(c, buf, pad_len);
+ }
+ }
+
+ /* Write back the LEB atomically */
+ err = ubi_leb_change(c->ubi, lnum, sbuf, len, UBI_UNKNOWN);
+ if (err)
+ return err;
+
+ dbg_rcvry("cleaned LEB %d", lnum);
+
+ return 0;
+}
+
+/**
+ * ubifs_clean_lebs - clean LEBs recovered during read-only mount.
+ * @c: UBIFS file-system description object
+ * @sbuf: LEB-sized buffer to use
+ *
+ * This function cleans a LEB identified during recovery that needs to be
+ * written but was not because UBIFS was mounted read-only. This happens when
+ * remounting to read-write mode.
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf)
+{
+ dbg_rcvry("recovery");
+ while (!list_empty(&c->unclean_leb_list)) {
+ struct ubifs_unclean_leb *ucleb;
+ int err;
+
+ ucleb = list_entry(c->unclean_leb_list.next,
+ struct ubifs_unclean_leb, list);
+ err = clean_an_unclean_leb(c, ucleb, sbuf);
+ if (err)
+ return err;
+ list_del(&ucleb->list);
+ kfree(ucleb);
+ }
+ return 0;
+}
+
+/**
+ * struct size_entry - inode size information for recovery.
+ * @rb: link in the RB-tree of sizes
+ * @inum: inode number
+ * @i_size: size on inode
+ * @d_size: maximum size based on data nodes
+ * @exists: indicates whether the inode exists
+ * @inode: inode if pinned in memory awaiting rw mode to fix it
+ */
+struct size_entry {
+ struct rb_node rb;
+ ino_t inum;
+ loff_t i_size;
+ loff_t d_size;
+ int exists;
+ struct inode *inode;
+};
+
+/**
+ * add_ino - add an entry to the size tree.
+ * @c: UBIFS file-system description object
+ * @inum: inode number
+ * @i_size: size on inode
+ * @d_size: maximum size based on data nodes
+ * @exists: indicates whether the inode exists
+ */
+static int add_ino(struct ubifs_info *c, ino_t inum, loff_t i_size,
+ loff_t d_size, int exists)
+{
+ struct rb_node **p = &c->size_tree.rb_node, *parent = NULL;
+ struct size_entry *e;
+
+ while (*p) {
+ parent = *p;
+ e = rb_entry(parent, struct size_entry, rb);
+ if (inum < e->inum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ e = kzalloc(sizeof(struct size_entry), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->inum = inum;
+ e->i_size = i_size;
+ e->d_size = d_size;
+ e->exists = exists;
+
+ rb_link_node(&e->rb, parent, p);
+ rb_insert_color(&e->rb, &c->size_tree);
+
+ return 0;
+}
+
+/**
+ * find_ino - find an entry on the size tree.
+ * @c: UBIFS file-system description object
+ * @inum: inode number
+ */
+static struct size_entry *find_ino(struct ubifs_info *c, ino_t inum)
+{
+ struct rb_node *p = c->size_tree.rb_node;
+ struct size_entry *e;
+
+ while (p) {
+ e = rb_entry(p, struct size_entry, rb);
+ if (inum < e->inum)
+ p = p->rb_left;
+ else if (inum > e->inum)
+ p = p->rb_right;
+ else
+ return e;
+ }
+ return NULL;
+}
+
+/**
+ * remove_ino - remove an entry from the size tree.
+ * @c: UBIFS file-system description object
+ * @inum: inode number
+ */
+static void remove_ino(struct ubifs_info *c, ino_t inum)
+{
+ struct size_entry *e = find_ino(c, inum);
+
+ if (!e)
+ return;
+ rb_erase(&e->rb, &c->size_tree);
+ kfree(e);
+}
+
+/**
+ * ubifs_recover_size_accum - accumulate inode sizes for recovery.
+ * @c: UBIFS file-system description object
+ * @key: node key
+ * @deletion: node is for a deletion
+ * @new_size: inode size
+ *
+ * This function has two purposes:
+ * 1) to ensure there are no data nodes that fall outside the inode size
+ * 2) to ensure there are no data nodes for inodes that do not exist
+ * To accomplish those purposes, a rb-tree is constructed containing an entry
+ * for each inode number in the journal that has not been deleted, and recording
+ * the size from the inode node, the maximum size of any data node (also altered
+ * by truncations) and a flag indicating a inode number for which no inode node
+ * was present in the journal.
+ *
+ * Note that there is still the possibility that there are data nodes that have
+ * been committed that are beyond the inode size, however the only way to find
+ * them would be to scan the entire index. Alternatively, some provision could
+ * be made to record the size of inodes at the start of commit, which would seem
+ * very cumbersome for a scenario that is quite unlikely and the only negative
+ * consequence of which is wasted space.
+ *
+ * This functions returns %0 on success and a negative error code on failure.
+ */
+int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
+ int deletion, loff_t new_size)
+{
+ ino_t inum = key_inum(c, key);
+ struct size_entry *e;
+ int err;
+
+ switch (key_type(c, key)) {
+ case UBIFS_INO_KEY:
+ if (deletion)
+ remove_ino(c, inum);
+ else {
+ e = find_ino(c, inum);
+ if (e) {
+ e->i_size = new_size;
+ e->exists = 1;
+ } else {
+ err = add_ino(c, inum, new_size, 0, 1);
+ if (err)
+ return err;
+ }
+ }
+ break;
+ case UBIFS_DATA_KEY:
+ e = find_ino(c, inum);
+ if (e) {
+ if (new_size > e->d_size)
+ e->d_size = new_size;
+ } else {
+ err = add_ino(c, inum, 0, new_size, 0);
+ if (err)
+ return err;
+ }
+ break;
+ case UBIFS_TRUN_KEY:
+ e = find_ino(c, inum);
+ if (e)
+ e->d_size = new_size;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * ubifs_recover_size - recover inode size.
+ * @c: UBIFS file-system description object
+ *
+ * This function attempts to fix inode size discrepancies identified by the
+ * 'ubifs_recover_size_accum()' function.
+ *
+ * This functions returns %0 on success and a negative error code on failure.
+ */
+int ubifs_recover_size(struct ubifs_info *c)
+{
+ struct rb_node *this = rb_first(&c->size_tree);
+
+ while (this) {
+ struct size_entry *e;
+ int err;
+
+ e = rb_entry(this, struct size_entry, rb);
+ if (!e->exists) {
+ union ubifs_key key;
+
+ ino_key_init(c, &key, e->inum);
+ err = ubifs_tnc_lookup(c, &key, c->sbuf);
+ if (err && err != -ENOENT)
+ return err;
+ if (err == -ENOENT) {
+ /* Remove data nodes that have no inode */
+ dbg_rcvry("removing ino %lu",
+ (unsigned long)e->inum);
+ err = ubifs_tnc_remove_ino(c, e->inum);
+ if (err)
+ return err;
+ } else {
+ struct ubifs_ino_node *ino = c->sbuf;
+
+ e->exists = 1;
+ e->i_size = le64_to_cpu(ino->size);
+ }
+ }
+ if (e->exists && e->i_size < e->d_size) {
+ if (!e->inode && (c->vfs_sb->s_flags & MS_RDONLY)) {
+ /* Fix the inode size and pin it in memory */
+ struct inode *inode;
+
+ inode = ubifs_iget(c->vfs_sb, e->inum);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ if (inode->i_size < e->d_size) {
+ dbg_rcvry("ino %lu size %lld -> %lld",
+ (unsigned long)e->inum,
+ e->d_size, inode->i_size);
+ inode->i_size = e->d_size;
+ ubifs_inode(inode)->ui_size = e->d_size;
+ e->inode = inode;
+ this = rb_next(this);
+ continue;
+ }
+ iput(inode);
+ }
+ }
+ this = rb_next(this);
+ rb_erase(&e->rb, &c->size_tree);
+ kfree(e);
+ }
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/replay.c b/qemu/roms/u-boot/fs/ubifs/replay.c
new file mode 100644
index 000000000..da33a14ab
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/replay.c
@@ -0,0 +1,1070 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file contains journal replay code. It runs when the file-system is being
+ * mounted and requires no locking.
+ *
+ * The larger is the journal, the longer it takes to scan it, so the longer it
+ * takes to mount UBIFS. This is why the journal has limited size which may be
+ * changed depending on the system requirements. But a larger journal gives
+ * faster I/O speed because it writes the index less frequently. So this is a
+ * trade-off. Also, the journal is indexed by the in-memory index (TNC), so the
+ * larger is the journal, the more memory its index may consume.
+ */
+
+#include "ubifs.h"
+
+/*
+ * Replay flags.
+ *
+ * REPLAY_DELETION: node was deleted
+ * REPLAY_REF: node is a reference node
+ */
+enum {
+ REPLAY_DELETION = 1,
+ REPLAY_REF = 2,
+};
+
+/**
+ * struct replay_entry - replay tree entry.
+ * @lnum: logical eraseblock number of the node
+ * @offs: node offset
+ * @len: node length
+ * @sqnum: node sequence number
+ * @flags: replay flags
+ * @rb: links the replay tree
+ * @key: node key
+ * @nm: directory entry name
+ * @old_size: truncation old size
+ * @new_size: truncation new size
+ * @free: amount of free space in a bud
+ * @dirty: amount of dirty space in a bud from padding and deletion nodes
+ *
+ * UBIFS journal replay must compare node sequence numbers, which means it must
+ * build a tree of node information to insert into the TNC.
+ */
+struct replay_entry {
+ int lnum;
+ int offs;
+ int len;
+ unsigned long long sqnum;
+ int flags;
+ struct rb_node rb;
+ union ubifs_key key;
+ union {
+ struct qstr nm;
+ struct {
+ loff_t old_size;
+ loff_t new_size;
+ };
+ struct {
+ int free;
+ int dirty;
+ };
+ };
+};
+
+/**
+ * struct bud_entry - entry in the list of buds to replay.
+ * @list: next bud in the list
+ * @bud: bud description object
+ * @free: free bytes in the bud
+ * @sqnum: reference node sequence number
+ */
+struct bud_entry {
+ struct list_head list;
+ struct ubifs_bud *bud;
+ int free;
+ unsigned long long sqnum;
+};
+
+/**
+ * set_bud_lprops - set free and dirty space used by a bud.
+ * @c: UBIFS file-system description object
+ * @r: replay entry of bud
+ */
+static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r)
+{
+ const struct ubifs_lprops *lp;
+ int err = 0, dirty;
+
+ ubifs_get_lprops(c);
+
+ lp = ubifs_lpt_lookup_dirty(c, r->lnum);
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out;
+ }
+
+ dirty = lp->dirty;
+ if (r->offs == 0 && (lp->free != c->leb_size || lp->dirty != 0)) {
+ /*
+ * The LEB was added to the journal with a starting offset of
+ * zero which means the LEB must have been empty. The LEB
+ * property values should be lp->free == c->leb_size and
+ * lp->dirty == 0, but that is not the case. The reason is that
+ * the LEB was garbage collected. The garbage collector resets
+ * the free and dirty space without recording it anywhere except
+ * lprops, so if there is not a commit then lprops does not have
+ * that information next time the file system is mounted.
+ *
+ * We do not need to adjust free space because the scan has told
+ * us the exact value which is recorded in the replay entry as
+ * r->free.
+ *
+ * However we do need to subtract from the dirty space the
+ * amount of space that the garbage collector reclaimed, which
+ * is the whole LEB minus the amount of space that was free.
+ */
+ dbg_mnt("bud LEB %d was GC'd (%d free, %d dirty)", r->lnum,
+ lp->free, lp->dirty);
+ dbg_gc("bud LEB %d was GC'd (%d free, %d dirty)", r->lnum,
+ lp->free, lp->dirty);
+ dirty -= c->leb_size - lp->free;
+ /*
+ * If the replay order was perfect the dirty space would now be
+ * zero. The order is not perfect because the the journal heads
+ * race with each other. This is not a problem but is does mean
+ * that the dirty space may temporarily exceed c->leb_size
+ * during the replay.
+ */
+ if (dirty != 0)
+ dbg_msg("LEB %d lp: %d free %d dirty "
+ "replay: %d free %d dirty", r->lnum, lp->free,
+ lp->dirty, r->free, r->dirty);
+ }
+ lp = ubifs_change_lp(c, lp, r->free, dirty + r->dirty,
+ lp->flags | LPROPS_TAKEN, 0);
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out;
+ }
+out:
+ ubifs_release_lprops(c);
+ return err;
+}
+
+/**
+ * trun_remove_range - apply a replay entry for a truncation to the TNC.
+ * @c: UBIFS file-system description object
+ * @r: replay entry of truncation
+ */
+static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
+{
+ unsigned min_blk, max_blk;
+ union ubifs_key min_key, max_key;
+ ino_t ino;
+
+ min_blk = r->new_size / UBIFS_BLOCK_SIZE;
+ if (r->new_size & (UBIFS_BLOCK_SIZE - 1))
+ min_blk += 1;
+
+ max_blk = r->old_size / UBIFS_BLOCK_SIZE;
+ if ((r->old_size & (UBIFS_BLOCK_SIZE - 1)) == 0)
+ max_blk -= 1;
+
+ ino = key_inum(c, &r->key);
+
+ data_key_init(c, &min_key, ino, min_blk);
+ data_key_init(c, &max_key, ino, max_blk);
+
+ return ubifs_tnc_remove_range(c, &min_key, &max_key);
+}
+
+/**
+ * apply_replay_entry - apply a replay entry to the TNC.
+ * @c: UBIFS file-system description object
+ * @r: replay entry to apply
+ *
+ * Apply a replay entry to the TNC.
+ */
+static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
+{
+ int err, deletion = ((r->flags & REPLAY_DELETION) != 0);
+
+ dbg_mnt("LEB %d:%d len %d flgs %d sqnum %llu %s", r->lnum,
+ r->offs, r->len, r->flags, r->sqnum, DBGKEY(&r->key));
+
+ /* Set c->replay_sqnum to help deal with dangling branches. */
+ c->replay_sqnum = r->sqnum;
+
+ if (r->flags & REPLAY_REF)
+ err = set_bud_lprops(c, r);
+ else if (is_hash_key(c, &r->key)) {
+ if (deletion)
+ err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
+ else
+ err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs,
+ r->len, &r->nm);
+ } else {
+ if (deletion)
+ switch (key_type(c, &r->key)) {
+ case UBIFS_INO_KEY:
+ {
+ ino_t inum = key_inum(c, &r->key);
+
+ err = ubifs_tnc_remove_ino(c, inum);
+ break;
+ }
+ case UBIFS_TRUN_KEY:
+ err = trun_remove_range(c, r);
+ break;
+ default:
+ err = ubifs_tnc_remove(c, &r->key);
+ break;
+ }
+ else
+ err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs,
+ r->len);
+ if (err)
+ return err;
+
+ if (c->need_recovery)
+ err = ubifs_recover_size_accum(c, &r->key, deletion,
+ r->new_size);
+ }
+
+ return err;
+}
+
+/**
+ * destroy_replay_tree - destroy the replay.
+ * @c: UBIFS file-system description object
+ *
+ * Destroy the replay tree.
+ */
+static void destroy_replay_tree(struct ubifs_info *c)
+{
+ struct rb_node *this = c->replay_tree.rb_node;
+ struct replay_entry *r;
+
+ while (this) {
+ if (this->rb_left) {
+ this = this->rb_left;
+ continue;
+ } else if (this->rb_right) {
+ this = this->rb_right;
+ continue;
+ }
+ r = rb_entry(this, struct replay_entry, rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &r->rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+ if (is_hash_key(c, &r->key))
+ kfree((void *)r->nm.name);
+ kfree(r);
+ }
+ c->replay_tree = RB_ROOT;
+}
+
+/**
+ * apply_replay_tree - apply the replay tree to the TNC.
+ * @c: UBIFS file-system description object
+ *
+ * Apply the replay tree.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int apply_replay_tree(struct ubifs_info *c)
+{
+ struct rb_node *this = rb_first(&c->replay_tree);
+
+ while (this) {
+ struct replay_entry *r;
+ int err;
+
+ cond_resched();
+
+ r = rb_entry(this, struct replay_entry, rb);
+ err = apply_replay_entry(c, r);
+ if (err)
+ return err;
+ this = rb_next(this);
+ }
+ return 0;
+}
+
+/**
+ * insert_node - insert a node to the replay tree.
+ * @c: UBIFS file-system description object
+ * @lnum: node logical eraseblock number
+ * @offs: node offset
+ * @len: node length
+ * @key: node key
+ * @sqnum: sequence number
+ * @deletion: non-zero if this is a deletion
+ * @used: number of bytes in use in a LEB
+ * @old_size: truncation old size
+ * @new_size: truncation new size
+ *
+ * This function inserts a scanned non-direntry node to the replay tree. The
+ * replay tree is an RB-tree containing @struct replay_entry elements which are
+ * indexed by the sequence number. The replay tree is applied at the very end
+ * of the replay process. Since the tree is sorted in sequence number order,
+ * the older modifications are applied first. This function returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
+ union ubifs_key *key, unsigned long long sqnum,
+ int deletion, int *used, loff_t old_size,
+ loff_t new_size)
+{
+ struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
+ struct replay_entry *r;
+
+ if (key_inum(c, key) >= c->highest_inum)
+ c->highest_inum = key_inum(c, key);
+
+ dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
+ while (*p) {
+ parent = *p;
+ r = rb_entry(parent, struct replay_entry, rb);
+ if (sqnum < r->sqnum) {
+ p = &(*p)->rb_left;
+ continue;
+ } else if (sqnum > r->sqnum) {
+ p = &(*p)->rb_right;
+ continue;
+ }
+ ubifs_err("duplicate sqnum in replay");
+ return -EINVAL;
+ }
+
+ r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ if (!deletion)
+ *used += ALIGN(len, 8);
+ r->lnum = lnum;
+ r->offs = offs;
+ r->len = len;
+ r->sqnum = sqnum;
+ r->flags = (deletion ? REPLAY_DELETION : 0);
+ r->old_size = old_size;
+ r->new_size = new_size;
+ key_copy(c, key, &r->key);
+
+ rb_link_node(&r->rb, parent, p);
+ rb_insert_color(&r->rb, &c->replay_tree);
+ return 0;
+}
+
+/**
+ * insert_dent - insert a directory entry node into the replay tree.
+ * @c: UBIFS file-system description object
+ * @lnum: node logical eraseblock number
+ * @offs: node offset
+ * @len: node length
+ * @key: node key
+ * @name: directory entry name
+ * @nlen: directory entry name length
+ * @sqnum: sequence number
+ * @deletion: non-zero if this is a deletion
+ * @used: number of bytes in use in a LEB
+ *
+ * This function inserts a scanned directory entry node to the replay tree.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ *
+ * This function is also used for extended attribute entries because they are
+ * implemented as directory entry nodes.
+ */
+static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
+ union ubifs_key *key, const char *name, int nlen,
+ unsigned long long sqnum, int deletion, int *used)
+{
+ struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
+ struct replay_entry *r;
+ char *nbuf;
+
+ if (key_inum(c, key) >= c->highest_inum)
+ c->highest_inum = key_inum(c, key);
+
+ dbg_mnt("add LEB %d:%d, key %s", lnum, offs, DBGKEY(key));
+ while (*p) {
+ parent = *p;
+ r = rb_entry(parent, struct replay_entry, rb);
+ if (sqnum < r->sqnum) {
+ p = &(*p)->rb_left;
+ continue;
+ }
+ if (sqnum > r->sqnum) {
+ p = &(*p)->rb_right;
+ continue;
+ }
+ ubifs_err("duplicate sqnum in replay");
+ return -EINVAL;
+ }
+
+ r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+ nbuf = kmalloc(nlen + 1, GFP_KERNEL);
+ if (!nbuf) {
+ kfree(r);
+ return -ENOMEM;
+ }
+
+ if (!deletion)
+ *used += ALIGN(len, 8);
+ r->lnum = lnum;
+ r->offs = offs;
+ r->len = len;
+ r->sqnum = sqnum;
+ r->nm.len = nlen;
+ memcpy(nbuf, name, nlen);
+ nbuf[nlen] = '\0';
+ r->nm.name = nbuf;
+ r->flags = (deletion ? REPLAY_DELETION : 0);
+ key_copy(c, key, &r->key);
+
+ ubifs_assert(!*p);
+ rb_link_node(&r->rb, parent, p);
+ rb_insert_color(&r->rb, &c->replay_tree);
+ return 0;
+}
+
+/**
+ * ubifs_validate_entry - validate directory or extended attribute entry node.
+ * @c: UBIFS file-system description object
+ * @dent: the node to validate
+ *
+ * This function validates directory or extended attribute entry node @dent.
+ * Returns zero if the node is all right and a %-EINVAL if not.
+ */
+int ubifs_validate_entry(struct ubifs_info *c,
+ const struct ubifs_dent_node *dent)
+{
+ int key_type = key_type_flash(c, dent->key);
+ int nlen = le16_to_cpu(dent->nlen);
+
+ if (le32_to_cpu(dent->ch.len) != nlen + UBIFS_DENT_NODE_SZ + 1 ||
+ dent->type >= UBIFS_ITYPES_CNT ||
+ nlen > UBIFS_MAX_NLEN || dent->name[nlen] != 0 ||
+ strnlen((char *)dent->name, nlen) != nlen ||
+ le64_to_cpu(dent->inum) > MAX_INUM) {
+ ubifs_err("bad %s node", key_type == UBIFS_DENT_KEY ?
+ "directory entry" : "extended attribute entry");
+ return -EINVAL;
+ }
+
+ if (key_type != UBIFS_DENT_KEY && key_type != UBIFS_XENT_KEY) {
+ ubifs_err("bad key type %d", key_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * replay_bud - replay a bud logical eraseblock.
+ * @c: UBIFS file-system description object
+ * @lnum: bud logical eraseblock number to replay
+ * @offs: bud start offset
+ * @jhead: journal head to which this bud belongs
+ * @free: amount of free space in the bud is returned here
+ * @dirty: amount of dirty space from padding and deletion nodes is returned
+ * here
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
+ int *free, int *dirty)
+{
+ int err = 0, used = 0;
+ struct ubifs_scan_leb *sleb;
+ struct ubifs_scan_node *snod;
+ struct ubifs_bud *bud;
+
+ dbg_mnt("replay bud LEB %d, head %d", lnum, jhead);
+ if (c->need_recovery)
+ sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, jhead != GCHD);
+ else
+ sleb = ubifs_scan(c, lnum, offs, c->sbuf);
+ if (IS_ERR(sleb))
+ return PTR_ERR(sleb);
+
+ /*
+ * The bud does not have to start from offset zero - the beginning of
+ * the 'lnum' LEB may contain previously committed data. One of the
+ * things we have to do in replay is to correctly update lprops with
+ * newer information about this LEB.
+ *
+ * At this point lprops thinks that this LEB has 'c->leb_size - offs'
+ * bytes of free space because it only contain information about
+ * committed data.
+ *
+ * But we know that real amount of free space is 'c->leb_size -
+ * sleb->endpt', and the space in the 'lnum' LEB between 'offs' and
+ * 'sleb->endpt' is used by bud data. We have to correctly calculate
+ * how much of these data are dirty and update lprops with this
+ * information.
+ *
+ * The dirt in that LEB region is comprised of padding nodes, deletion
+ * nodes, truncation nodes and nodes which are obsoleted by subsequent
+ * nodes in this LEB. So instead of calculating clean space, we
+ * calculate used space ('used' variable).
+ */
+
+ list_for_each_entry(snod, &sleb->nodes, list) {
+ int deletion = 0;
+
+ cond_resched();
+
+ if (snod->sqnum >= SQNUM_WATERMARK) {
+ ubifs_err("file system's life ended");
+ goto out_dump;
+ }
+
+ if (snod->sqnum > c->max_sqnum)
+ c->max_sqnum = snod->sqnum;
+
+ switch (snod->type) {
+ case UBIFS_INO_NODE:
+ {
+ struct ubifs_ino_node *ino = snod->node;
+ loff_t new_size = le64_to_cpu(ino->size);
+
+ if (le32_to_cpu(ino->nlink) == 0)
+ deletion = 1;
+ err = insert_node(c, lnum, snod->offs, snod->len,
+ &snod->key, snod->sqnum, deletion,
+ &used, 0, new_size);
+ break;
+ }
+ case UBIFS_DATA_NODE:
+ {
+ struct ubifs_data_node *dn = snod->node;
+ loff_t new_size = le32_to_cpu(dn->size) +
+ key_block(c, &snod->key) *
+ UBIFS_BLOCK_SIZE;
+
+ err = insert_node(c, lnum, snod->offs, snod->len,
+ &snod->key, snod->sqnum, deletion,
+ &used, 0, new_size);
+ break;
+ }
+ case UBIFS_DENT_NODE:
+ case UBIFS_XENT_NODE:
+ {
+ struct ubifs_dent_node *dent = snod->node;
+
+ err = ubifs_validate_entry(c, dent);
+ if (err)
+ goto out_dump;
+
+ err = insert_dent(c, lnum, snod->offs, snod->len,
+ &snod->key, (char *)dent->name,
+ le16_to_cpu(dent->nlen), snod->sqnum,
+ !le64_to_cpu(dent->inum), &used);
+ break;
+ }
+ case UBIFS_TRUN_NODE:
+ {
+ struct ubifs_trun_node *trun = snod->node;
+ loff_t old_size = le64_to_cpu(trun->old_size);
+ loff_t new_size = le64_to_cpu(trun->new_size);
+ union ubifs_key key;
+
+ /* Validate truncation node */
+ if (old_size < 0 || old_size > c->max_inode_sz ||
+ new_size < 0 || new_size > c->max_inode_sz ||
+ old_size <= new_size) {
+ ubifs_err("bad truncation node");
+ goto out_dump;
+ }
+
+ /*
+ * Create a fake truncation key just to use the same
+ * functions which expect nodes to have keys.
+ */
+ trun_key_init(c, &key, le32_to_cpu(trun->inum));
+ err = insert_node(c, lnum, snod->offs, snod->len,
+ &key, snod->sqnum, 1, &used,
+ old_size, new_size);
+ break;
+ }
+ default:
+ ubifs_err("unexpected node type %d in bud LEB %d:%d",
+ snod->type, lnum, snod->offs);
+ err = -EINVAL;
+ goto out_dump;
+ }
+ if (err)
+ goto out;
+ }
+
+ bud = ubifs_search_bud(c, lnum);
+ if (!bud)
+ BUG();
+
+ ubifs_assert(sleb->endpt - offs >= used);
+ ubifs_assert(sleb->endpt % c->min_io_size == 0);
+
+ *dirty = sleb->endpt - offs - used;
+ *free = c->leb_size - sleb->endpt;
+
+out:
+ ubifs_scan_destroy(sleb);
+ return err;
+
+out_dump:
+ ubifs_err("bad node is at LEB %d:%d", lnum, snod->offs);
+ dbg_dump_node(c, snod->node);
+ ubifs_scan_destroy(sleb);
+ return -EINVAL;
+}
+
+/**
+ * insert_ref_node - insert a reference node to the replay tree.
+ * @c: UBIFS file-system description object
+ * @lnum: node logical eraseblock number
+ * @offs: node offset
+ * @sqnum: sequence number
+ * @free: amount of free space in bud
+ * @dirty: amount of dirty space from padding and deletion nodes
+ *
+ * This function inserts a reference node to the replay tree and returns zero
+ * in case of success or a negative error code in case of failure.
+ */
+static int insert_ref_node(struct ubifs_info *c, int lnum, int offs,
+ unsigned long long sqnum, int free, int dirty)
+{
+ struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
+ struct replay_entry *r;
+
+ dbg_mnt("add ref LEB %d:%d", lnum, offs);
+ while (*p) {
+ parent = *p;
+ r = rb_entry(parent, struct replay_entry, rb);
+ if (sqnum < r->sqnum) {
+ p = &(*p)->rb_left;
+ continue;
+ } else if (sqnum > r->sqnum) {
+ p = &(*p)->rb_right;
+ continue;
+ }
+ ubifs_err("duplicate sqnum in replay tree");
+ return -EINVAL;
+ }
+
+ r = kzalloc(sizeof(struct replay_entry), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+
+ r->lnum = lnum;
+ r->offs = offs;
+ r->sqnum = sqnum;
+ r->flags = REPLAY_REF;
+ r->free = free;
+ r->dirty = dirty;
+
+ rb_link_node(&r->rb, parent, p);
+ rb_insert_color(&r->rb, &c->replay_tree);
+ return 0;
+}
+
+/**
+ * replay_buds - replay all buds.
+ * @c: UBIFS file-system description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int replay_buds(struct ubifs_info *c)
+{
+ struct bud_entry *b;
+ int err, uninitialized_var(free), uninitialized_var(dirty);
+
+ list_for_each_entry(b, &c->replay_buds, list) {
+ err = replay_bud(c, b->bud->lnum, b->bud->start, b->bud->jhead,
+ &free, &dirty);
+ if (err)
+ return err;
+ err = insert_ref_node(c, b->bud->lnum, b->bud->start, b->sqnum,
+ free, dirty);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * destroy_bud_list - destroy the list of buds to replay.
+ * @c: UBIFS file-system description object
+ */
+static void destroy_bud_list(struct ubifs_info *c)
+{
+ struct bud_entry *b;
+
+ while (!list_empty(&c->replay_buds)) {
+ b = list_entry(c->replay_buds.next, struct bud_entry, list);
+ list_del(&b->list);
+ kfree(b);
+ }
+}
+
+/**
+ * add_replay_bud - add a bud to the list of buds to replay.
+ * @c: UBIFS file-system description object
+ * @lnum: bud logical eraseblock number to replay
+ * @offs: bud start offset
+ * @jhead: journal head to which this bud belongs
+ * @sqnum: reference node sequence number
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
+ unsigned long long sqnum)
+{
+ struct ubifs_bud *bud;
+ struct bud_entry *b;
+
+ dbg_mnt("add replay bud LEB %d:%d, head %d", lnum, offs, jhead);
+
+ bud = kmalloc(sizeof(struct ubifs_bud), GFP_KERNEL);
+ if (!bud)
+ return -ENOMEM;
+
+ b = kmalloc(sizeof(struct bud_entry), GFP_KERNEL);
+ if (!b) {
+ kfree(bud);
+ return -ENOMEM;
+ }
+
+ bud->lnum = lnum;
+ bud->start = offs;
+ bud->jhead = jhead;
+ ubifs_add_bud(c, bud);
+
+ b->bud = bud;
+ b->sqnum = sqnum;
+ list_add_tail(&b->list, &c->replay_buds);
+
+ return 0;
+}
+
+/**
+ * validate_ref - validate a reference node.
+ * @c: UBIFS file-system description object
+ * @ref: the reference node to validate
+ * @ref_lnum: LEB number of the reference node
+ * @ref_offs: reference node offset
+ *
+ * This function returns %1 if a bud reference already exists for the LEB. %0 is
+ * returned if the reference node is new, otherwise %-EINVAL is returned if
+ * validation failed.
+ */
+static int validate_ref(struct ubifs_info *c, const struct ubifs_ref_node *ref)
+{
+ struct ubifs_bud *bud;
+ int lnum = le32_to_cpu(ref->lnum);
+ unsigned int offs = le32_to_cpu(ref->offs);
+ unsigned int jhead = le32_to_cpu(ref->jhead);
+
+ /*
+ * ref->offs may point to the end of LEB when the journal head points
+ * to the end of LEB and we write reference node for it during commit.
+ * So this is why we require 'offs > c->leb_size'.
+ */
+ if (jhead >= c->jhead_cnt || lnum >= c->leb_cnt ||
+ lnum < c->main_first || offs > c->leb_size ||
+ offs & (c->min_io_size - 1))
+ return -EINVAL;
+
+ /* Make sure we have not already looked at this bud */
+ bud = ubifs_search_bud(c, lnum);
+ if (bud) {
+ if (bud->jhead == jhead && bud->start <= offs)
+ return 1;
+ ubifs_err("bud at LEB %d:%d was already referred", lnum, offs);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * replay_log_leb - replay a log logical eraseblock.
+ * @c: UBIFS file-system description object
+ * @lnum: log logical eraseblock to replay
+ * @offs: offset to start replaying from
+ * @sbuf: scan buffer
+ *
+ * This function replays a log LEB and returns zero in case of success, %1 if
+ * this is the last LEB in the log, and a negative error code in case of
+ * failure.
+ */
+static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
+{
+ int err;
+ struct ubifs_scan_leb *sleb;
+ struct ubifs_scan_node *snod;
+ const struct ubifs_cs_node *node;
+
+ dbg_mnt("replay log LEB %d:%d", lnum, offs);
+ sleb = ubifs_scan(c, lnum, offs, sbuf);
+ if (IS_ERR(sleb)) {
+ if (c->need_recovery)
+ sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
+ if (IS_ERR(sleb))
+ return PTR_ERR(sleb);
+ }
+
+ if (sleb->nodes_cnt == 0) {
+ err = 1;
+ goto out;
+ }
+
+ node = sleb->buf;
+
+ snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
+ if (c->cs_sqnum == 0) {
+ /*
+ * This is the first log LEB we are looking at, make sure that
+ * the first node is a commit start node. Also record its
+ * sequence number so that UBIFS can determine where the log
+ * ends, because all nodes which were have higher sequence
+ * numbers.
+ */
+ if (snod->type != UBIFS_CS_NODE) {
+ dbg_err("first log node at LEB %d:%d is not CS node",
+ lnum, offs);
+ goto out_dump;
+ }
+ if (le64_to_cpu(node->cmt_no) != c->cmt_no) {
+ dbg_err("first CS node at LEB %d:%d has wrong "
+ "commit number %llu expected %llu",
+ lnum, offs,
+ (unsigned long long)le64_to_cpu(node->cmt_no),
+ c->cmt_no);
+ goto out_dump;
+ }
+
+ c->cs_sqnum = le64_to_cpu(node->ch.sqnum);
+ dbg_mnt("commit start sqnum %llu", c->cs_sqnum);
+ }
+
+ if (snod->sqnum < c->cs_sqnum) {
+ /*
+ * This means that we reached end of log and now
+ * look to the older log data, which was already
+ * committed but the eraseblock was not erased (UBIFS
+ * only un-maps it). So this basically means we have to
+ * exit with "end of log" code.
+ */
+ err = 1;
+ goto out;
+ }
+
+ /* Make sure the first node sits at offset zero of the LEB */
+ if (snod->offs != 0) {
+ dbg_err("first node is not at zero offset");
+ goto out_dump;
+ }
+
+ list_for_each_entry(snod, &sleb->nodes, list) {
+
+ cond_resched();
+
+ if (snod->sqnum >= SQNUM_WATERMARK) {
+ ubifs_err("file system's life ended");
+ goto out_dump;
+ }
+
+ if (snod->sqnum < c->cs_sqnum) {
+ dbg_err("bad sqnum %llu, commit sqnum %llu",
+ snod->sqnum, c->cs_sqnum);
+ goto out_dump;
+ }
+
+ if (snod->sqnum > c->max_sqnum)
+ c->max_sqnum = snod->sqnum;
+
+ switch (snod->type) {
+ case UBIFS_REF_NODE: {
+ const struct ubifs_ref_node *ref = snod->node;
+
+ err = validate_ref(c, ref);
+ if (err == 1)
+ break; /* Already have this bud */
+ if (err)
+ goto out_dump;
+
+ err = add_replay_bud(c, le32_to_cpu(ref->lnum),
+ le32_to_cpu(ref->offs),
+ le32_to_cpu(ref->jhead),
+ snod->sqnum);
+ if (err)
+ goto out;
+
+ break;
+ }
+ case UBIFS_CS_NODE:
+ /* Make sure it sits at the beginning of LEB */
+ if (snod->offs != 0) {
+ ubifs_err("unexpected node in log");
+ goto out_dump;
+ }
+ break;
+ default:
+ ubifs_err("unexpected node in log");
+ goto out_dump;
+ }
+ }
+
+ if (sleb->endpt || c->lhead_offs >= c->leb_size) {
+ c->lhead_lnum = lnum;
+ c->lhead_offs = sleb->endpt;
+ }
+
+ err = !sleb->endpt;
+out:
+ ubifs_scan_destroy(sleb);
+ return err;
+
+out_dump:
+ ubifs_err("log error detected while replying the log at LEB %d:%d",
+ lnum, offs + snod->offs);
+ dbg_dump_node(c, snod->node);
+ ubifs_scan_destroy(sleb);
+ return -EINVAL;
+}
+
+/**
+ * take_ihead - update the status of the index head in lprops to 'taken'.
+ * @c: UBIFS file-system description object
+ *
+ * This function returns the amount of free space in the index head LEB or a
+ * negative error code.
+ */
+static int take_ihead(struct ubifs_info *c)
+{
+ const struct ubifs_lprops *lp;
+ int err, free;
+
+ ubifs_get_lprops(c);
+
+ lp = ubifs_lpt_lookup_dirty(c, c->ihead_lnum);
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out;
+ }
+
+ free = lp->free;
+
+ lp = ubifs_change_lp(c, lp, LPROPS_NC, LPROPS_NC,
+ lp->flags | LPROPS_TAKEN, 0);
+ if (IS_ERR(lp)) {
+ err = PTR_ERR(lp);
+ goto out;
+ }
+
+ err = free;
+out:
+ ubifs_release_lprops(c);
+ return err;
+}
+
+/**
+ * ubifs_replay_journal - replay journal.
+ * @c: UBIFS file-system description object
+ *
+ * This function scans the journal, replays and cleans it up. It makes sure all
+ * memory data structures related to uncommitted journal are built (dirty TNC
+ * tree, tree of buds, modified lprops, etc).
+ */
+int ubifs_replay_journal(struct ubifs_info *c)
+{
+ int err, i, lnum, offs, _free;
+ void *sbuf = NULL;
+
+ BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
+
+ /* Update the status of the index head in lprops to 'taken' */
+ _free = take_ihead(c);
+ if (_free < 0)
+ return _free; /* Error code */
+
+ if (c->ihead_offs != c->leb_size - _free) {
+ ubifs_err("bad index head LEB %d:%d", c->ihead_lnum,
+ c->ihead_offs);
+ return -EINVAL;
+ }
+
+ sbuf = vmalloc(c->leb_size);
+ if (!sbuf)
+ return -ENOMEM;
+
+ dbg_mnt("start replaying the journal");
+
+ c->replaying = 1;
+
+ lnum = c->ltail_lnum = c->lhead_lnum;
+ offs = c->lhead_offs;
+
+ for (i = 0; i < c->log_lebs; i++, lnum++) {
+ if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) {
+ /*
+ * The log is logically circular, we reached the last
+ * LEB, switch to the first one.
+ */
+ lnum = UBIFS_LOG_LNUM;
+ offs = 0;
+ }
+ err = replay_log_leb(c, lnum, offs, sbuf);
+ if (err == 1)
+ /* We hit the end of the log */
+ break;
+ if (err)
+ goto out;
+ offs = 0;
+ }
+
+ err = replay_buds(c);
+ if (err)
+ goto out;
+
+ err = apply_replay_tree(c);
+ if (err)
+ goto out;
+
+ ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery);
+ dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, "
+ "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum,
+ (unsigned long)c->highest_inum);
+out:
+ destroy_replay_tree(c);
+ destroy_bud_list(c);
+ vfree(sbuf);
+ c->replaying = 0;
+ return err;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/sb.c b/qemu/roms/u-boot/fs/ubifs/sb.c
new file mode 100644
index 000000000..00c9cd31a
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/sb.c
@@ -0,0 +1,346 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This file implements UBIFS superblock. The superblock is stored at the first
+ * LEB of the volume and is never changed by UBIFS. Only user-space tools may
+ * change it. The superblock node mostly contains geometry information.
+ */
+
+#include "ubifs.h"
+
+/*
+ * Default journal size in logical eraseblocks as a percent of total
+ * flash size.
+ */
+#define DEFAULT_JNL_PERCENT 5
+
+/* Default maximum journal size in bytes */
+#define DEFAULT_MAX_JNL (32*1024*1024)
+
+/* Default indexing tree fanout */
+#define DEFAULT_FANOUT 8
+
+/* Default number of data journal heads */
+#define DEFAULT_JHEADS_CNT 1
+
+/* Default positions of different LEBs in the main area */
+#define DEFAULT_IDX_LEB 0
+#define DEFAULT_DATA_LEB 1
+#define DEFAULT_GC_LEB 2
+
+/* Default number of LEB numbers in LPT's save table */
+#define DEFAULT_LSAVE_CNT 256
+
+/* Default reserved pool size as a percent of maximum free space */
+#define DEFAULT_RP_PERCENT 5
+
+/* The default maximum size of reserved pool in bytes */
+#define DEFAULT_MAX_RP_SIZE (5*1024*1024)
+
+/* Default time granularity in nanoseconds */
+#define DEFAULT_TIME_GRAN 1000000000
+
+/**
+ * validate_sb - validate superblock node.
+ * @c: UBIFS file-system description object
+ * @sup: superblock node
+ *
+ * This function validates superblock node @sup. Since most of data was read
+ * from the superblock and stored in @c, the function validates fields in @c
+ * instead. Returns zero in case of success and %-EINVAL in case of validation
+ * failure.
+ */
+static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
+{
+ long long max_bytes;
+ int err = 1, min_leb_cnt;
+
+ if (!c->key_hash) {
+ err = 2;
+ goto failed;
+ }
+
+ if (sup->key_fmt != UBIFS_SIMPLE_KEY_FMT) {
+ err = 3;
+ goto failed;
+ }
+
+ if (le32_to_cpu(sup->min_io_size) != c->min_io_size) {
+ ubifs_err("min. I/O unit mismatch: %d in superblock, %d real",
+ le32_to_cpu(sup->min_io_size), c->min_io_size);
+ goto failed;
+ }
+
+ if (le32_to_cpu(sup->leb_size) != c->leb_size) {
+ ubifs_err("LEB size mismatch: %d in superblock, %d real",
+ le32_to_cpu(sup->leb_size), c->leb_size);
+ goto failed;
+ }
+
+ if (c->log_lebs < UBIFS_MIN_LOG_LEBS ||
+ c->lpt_lebs < UBIFS_MIN_LPT_LEBS ||
+ c->orph_lebs < UBIFS_MIN_ORPH_LEBS ||
+ c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
+ err = 4;
+ goto failed;
+ }
+
+ /*
+ * Calculate minimum allowed amount of main area LEBs. This is very
+ * similar to %UBIFS_MIN_LEB_CNT, but we take into account real what we
+ * have just read from the superblock.
+ */
+ min_leb_cnt = UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs;
+ min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
+
+ if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
+ ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, "
+ "%d minimum required", c->leb_cnt, c->vi.size,
+ min_leb_cnt);
+ goto failed;
+ }
+
+ if (c->max_leb_cnt < c->leb_cnt) {
+ ubifs_err("max. LEB count %d less than LEB count %d",
+ c->max_leb_cnt, c->leb_cnt);
+ goto failed;
+ }
+
+ if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
+ err = 7;
+ goto failed;
+ }
+
+ if (c->max_bud_bytes < (long long)c->leb_size * UBIFS_MIN_BUD_LEBS ||
+ c->max_bud_bytes > (long long)c->leb_size * c->main_lebs) {
+ err = 8;
+ goto failed;
+ }
+
+ if (c->jhead_cnt < NONDATA_JHEADS_CNT + 1 ||
+ c->jhead_cnt > NONDATA_JHEADS_CNT + UBIFS_MAX_JHEADS) {
+ err = 9;
+ goto failed;
+ }
+
+ if (c->fanout < UBIFS_MIN_FANOUT ||
+ ubifs_idx_node_sz(c, c->fanout) > c->leb_size) {
+ err = 10;
+ goto failed;
+ }
+
+ if (c->lsave_cnt < 0 || (c->lsave_cnt > DEFAULT_LSAVE_CNT &&
+ c->lsave_cnt > c->max_leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS -
+ c->log_lebs - c->lpt_lebs - c->orph_lebs)) {
+ err = 11;
+ goto failed;
+ }
+
+ if (UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs + c->lpt_lebs +
+ c->orph_lebs + c->main_lebs != c->leb_cnt) {
+ err = 12;
+ goto failed;
+ }
+
+ if (c->default_compr < 0 || c->default_compr >= UBIFS_COMPR_TYPES_CNT) {
+ err = 13;
+ goto failed;
+ }
+
+ max_bytes = c->main_lebs * (long long)c->leb_size;
+ if (c->rp_size < 0 || max_bytes < c->rp_size) {
+ err = 14;
+ goto failed;
+ }
+
+ if (le32_to_cpu(sup->time_gran) > 1000000000 ||
+ le32_to_cpu(sup->time_gran) < 1) {
+ err = 15;
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ ubifs_err("bad superblock, error %d", err);
+ dbg_dump_node(c, sup);
+ return -EINVAL;
+}
+
+/**
+ * ubifs_read_sb_node - read superblock node.
+ * @c: UBIFS file-system description object
+ *
+ * This function returns a pointer to the superblock node or a negative error
+ * code.
+ */
+struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
+{
+ struct ubifs_sb_node *sup;
+ int err;
+
+ sup = kmalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_NOFS);
+ if (!sup)
+ return ERR_PTR(-ENOMEM);
+
+ err = ubifs_read_node(c, sup, UBIFS_SB_NODE, UBIFS_SB_NODE_SZ,
+ UBIFS_SB_LNUM, 0);
+ if (err) {
+ kfree(sup);
+ return ERR_PTR(err);
+ }
+
+ return sup;
+}
+
+/**
+ * ubifs_read_superblock - read superblock.
+ * @c: UBIFS file-system description object
+ *
+ * This function finds, reads and checks the superblock. If an empty UBI volume
+ * is being mounted, this function creates default superblock. Returns zero in
+ * case of success, and a negative error code in case of failure.
+ */
+int ubifs_read_superblock(struct ubifs_info *c)
+{
+ int err, sup_flags;
+ struct ubifs_sb_node *sup;
+
+ if (c->empty) {
+ printf("No UBIFS filesystem found!\n");
+ return -1;
+ }
+
+ sup = ubifs_read_sb_node(c);
+ if (IS_ERR(sup))
+ return PTR_ERR(sup);
+
+ c->fmt_version = le32_to_cpu(sup->fmt_version);
+ c->ro_compat_version = le32_to_cpu(sup->ro_compat_version);
+
+ /*
+ * The software supports all previous versions but not future versions,
+ * due to the unavailability of time-travelling equipment.
+ */
+ if (c->fmt_version > UBIFS_FORMAT_VERSION) {
+ struct super_block *sb = c->vfs_sb;
+ int mounting_ro = sb->s_flags & MS_RDONLY;
+
+ ubifs_assert(!c->ro_media || mounting_ro);
+ if (!mounting_ro ||
+ c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
+ ubifs_err("on-flash format version is w%d/r%d, but "
+ "software only supports up to version "
+ "w%d/r%d", c->fmt_version,
+ c->ro_compat_version, UBIFS_FORMAT_VERSION,
+ UBIFS_RO_COMPAT_VERSION);
+ if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
+ ubifs_msg("only R/O mounting is possible");
+ err = -EROFS;
+ } else
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * The FS is mounted R/O, and the media format is
+ * R/O-compatible with the UBIFS implementation, so we can
+ * mount.
+ */
+ c->rw_incompat = 1;
+ }
+
+ if (c->fmt_version < 3) {
+ ubifs_err("on-flash format version %d is not supported",
+ c->fmt_version);
+ err = -EINVAL;
+ goto out;
+ }
+
+ switch (sup->key_hash) {
+ case UBIFS_KEY_HASH_R5:
+ c->key_hash = key_r5_hash;
+ c->key_hash_type = UBIFS_KEY_HASH_R5;
+ break;
+
+ case UBIFS_KEY_HASH_TEST:
+ c->key_hash = key_test_hash;
+ c->key_hash_type = UBIFS_KEY_HASH_TEST;
+ break;
+ };
+
+ c->key_fmt = sup->key_fmt;
+
+ switch (c->key_fmt) {
+ case UBIFS_SIMPLE_KEY_FMT:
+ c->key_len = UBIFS_SK_LEN;
+ break;
+ default:
+ ubifs_err("unsupported key format");
+ err = -EINVAL;
+ goto out;
+ }
+
+ c->leb_cnt = le32_to_cpu(sup->leb_cnt);
+ c->max_leb_cnt = le32_to_cpu(sup->max_leb_cnt);
+ c->max_bud_bytes = le64_to_cpu(sup->max_bud_bytes);
+ c->log_lebs = le32_to_cpu(sup->log_lebs);
+ c->lpt_lebs = le32_to_cpu(sup->lpt_lebs);
+ c->orph_lebs = le32_to_cpu(sup->orph_lebs);
+ c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT;
+ c->fanout = le32_to_cpu(sup->fanout);
+ c->lsave_cnt = le32_to_cpu(sup->lsave_cnt);
+ c->default_compr = le16_to_cpu(sup->default_compr);
+ c->rp_size = le64_to_cpu(sup->rp_size);
+ c->rp_uid = le32_to_cpu(sup->rp_uid);
+ c->rp_gid = le32_to_cpu(sup->rp_gid);
+ sup_flags = le32_to_cpu(sup->flags);
+
+ c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran);
+ memcpy(&c->uuid, &sup->uuid, 16);
+ c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT);
+
+ /* Automatically increase file system size to the maximum size */
+ c->old_leb_cnt = c->leb_cnt;
+ if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) {
+ c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size);
+ dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs",
+ c->old_leb_cnt, c->leb_cnt);
+ }
+
+ c->log_bytes = (long long)c->log_lebs * c->leb_size;
+ c->log_last = UBIFS_LOG_LNUM + c->log_lebs - 1;
+ c->lpt_first = UBIFS_LOG_LNUM + c->log_lebs;
+ c->lpt_last = c->lpt_first + c->lpt_lebs - 1;
+ c->orph_first = c->lpt_last + 1;
+ c->orph_last = c->orph_first + c->orph_lebs - 1;
+ c->main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS;
+ c->main_lebs -= c->log_lebs + c->lpt_lebs + c->orph_lebs;
+ c->main_first = c->leb_cnt - c->main_lebs;
+ c->report_rp_size = ubifs_reported_space(c, c->rp_size);
+
+ err = validate_sb(c, sup);
+out:
+ kfree(sup);
+ return err;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/scan.c b/qemu/roms/u-boot/fs/ubifs/scan.c
new file mode 100644
index 000000000..0ed82479b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/scan.c
@@ -0,0 +1,362 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements the scan which is a general-purpose function for
+ * determining what nodes are in an eraseblock. The scan is used to replay the
+ * journal, to do garbage collection. for the TNC in-the-gaps method, and by
+ * debugging functions.
+ */
+
+#include "ubifs.h"
+
+/**
+ * scan_padding_bytes - scan for padding bytes.
+ * @buf: buffer to scan
+ * @len: length of buffer
+ *
+ * This function returns the number of padding bytes on success and
+ * %SCANNED_GARBAGE on failure.
+ */
+static int scan_padding_bytes(void *buf, int len)
+{
+ int pad_len = 0, max_pad_len = min_t(int, UBIFS_PAD_NODE_SZ, len);
+ uint8_t *p = buf;
+
+ dbg_scan("not a node");
+
+ while (pad_len < max_pad_len && *p++ == UBIFS_PADDING_BYTE)
+ pad_len += 1;
+
+ if (!pad_len || (pad_len & 7))
+ return SCANNED_GARBAGE;
+
+ dbg_scan("%d padding bytes", pad_len);
+
+ return pad_len;
+}
+
+/**
+ * ubifs_scan_a_node - scan for a node or padding.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to scan
+ * @len: length of buffer
+ * @lnum: logical eraseblock number
+ * @offs: offset within the logical eraseblock
+ * @quiet: print no messages
+ *
+ * This function returns a scanning code to indicate what was scanned.
+ */
+int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
+ int offs, int quiet)
+{
+ struct ubifs_ch *ch = buf;
+ uint32_t magic;
+
+ magic = le32_to_cpu(ch->magic);
+
+ if (magic == 0xFFFFFFFF) {
+ dbg_scan("hit empty space");
+ return SCANNED_EMPTY_SPACE;
+ }
+
+ if (magic != UBIFS_NODE_MAGIC)
+ return scan_padding_bytes(buf, len);
+
+ if (len < UBIFS_CH_SZ)
+ return SCANNED_GARBAGE;
+
+ dbg_scan("scanning %s", dbg_ntype(ch->node_type));
+
+ if (ubifs_check_node(c, buf, lnum, offs, quiet, 1))
+ return SCANNED_A_CORRUPT_NODE;
+
+ if (ch->node_type == UBIFS_PAD_NODE) {
+ struct ubifs_pad_node *pad = buf;
+ int pad_len = le32_to_cpu(pad->pad_len);
+ int node_len = le32_to_cpu(ch->len);
+
+ /* Validate the padding node */
+ if (pad_len < 0 ||
+ offs + node_len + pad_len > c->leb_size) {
+ if (!quiet) {
+ ubifs_err("bad pad node at LEB %d:%d",
+ lnum, offs);
+ dbg_dump_node(c, pad);
+ }
+ return SCANNED_A_BAD_PAD_NODE;
+ }
+
+ /* Make the node pads to 8-byte boundary */
+ if ((node_len + pad_len) & 7) {
+ if (!quiet) {
+ dbg_err("bad padding length %d - %d",
+ offs, offs + node_len + pad_len);
+ }
+ return SCANNED_A_BAD_PAD_NODE;
+ }
+
+ dbg_scan("%d bytes padded, offset now %d",
+ pad_len, ALIGN(offs + node_len + pad_len, 8));
+
+ return node_len + pad_len;
+ }
+
+ return SCANNED_A_NODE;
+}
+
+/**
+ * ubifs_start_scan - create LEB scanning information at start of scan.
+ * @c: UBIFS file-system description object
+ * @lnum: logical eraseblock number
+ * @offs: offset to start at (usually zero)
+ * @sbuf: scan buffer (must be c->leb_size)
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
+ int offs, void *sbuf)
+{
+ struct ubifs_scan_leb *sleb;
+ int err;
+
+ dbg_scan("scan LEB %d:%d", lnum, offs);
+
+ sleb = kzalloc(sizeof(struct ubifs_scan_leb), GFP_NOFS);
+ if (!sleb)
+ return ERR_PTR(-ENOMEM);
+
+ sleb->lnum = lnum;
+ INIT_LIST_HEAD(&sleb->nodes);
+ sleb->buf = sbuf;
+
+ err = ubi_read(c->ubi, lnum, sbuf + offs, offs, c->leb_size - offs);
+ if (err && err != -EBADMSG) {
+ ubifs_err("cannot read %d bytes from LEB %d:%d,"
+ " error %d", c->leb_size - offs, lnum, offs, err);
+ kfree(sleb);
+ return ERR_PTR(err);
+ }
+
+ if (err == -EBADMSG)
+ sleb->ecc = 1;
+
+ return sleb;
+}
+
+/**
+ * ubifs_end_scan - update LEB scanning information at end of scan.
+ * @c: UBIFS file-system description object
+ * @sleb: scanning information
+ * @lnum: logical eraseblock number
+ * @offs: offset to start at (usually zero)
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ int lnum, int offs)
+{
+ lnum = lnum;
+ dbg_scan("stop scanning LEB %d at offset %d", lnum, offs);
+ ubifs_assert(offs % c->min_io_size == 0);
+
+ sleb->endpt = ALIGN(offs, c->min_io_size);
+}
+
+/**
+ * ubifs_add_snod - add a scanned node to LEB scanning information.
+ * @c: UBIFS file-system description object
+ * @sleb: scanning information
+ * @buf: buffer containing node
+ * @offs: offset of node on flash
+ *
+ * This function returns %0 on success and a negative error code on failure.
+ */
+int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ void *buf, int offs)
+{
+ struct ubifs_ch *ch = buf;
+ struct ubifs_ino_node *ino = buf;
+ struct ubifs_scan_node *snod;
+
+ snod = kzalloc(sizeof(struct ubifs_scan_node), GFP_NOFS);
+ if (!snod)
+ return -ENOMEM;
+
+ snod->sqnum = le64_to_cpu(ch->sqnum);
+ snod->type = ch->node_type;
+ snod->offs = offs;
+ snod->len = le32_to_cpu(ch->len);
+ snod->node = buf;
+
+ switch (ch->node_type) {
+ case UBIFS_INO_NODE:
+ case UBIFS_DENT_NODE:
+ case UBIFS_XENT_NODE:
+ case UBIFS_DATA_NODE:
+ case UBIFS_TRUN_NODE:
+ /*
+ * The key is in the same place in all keyed
+ * nodes.
+ */
+ key_read(c, &ino->key, &snod->key);
+ break;
+ }
+ list_add_tail(&snod->list, &sleb->nodes);
+ sleb->nodes_cnt += 1;
+ return 0;
+}
+
+/**
+ * ubifs_scanned_corruption - print information after UBIFS scanned corruption.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number of corruption
+ * @offs: offset of corruption
+ * @buf: buffer containing corruption
+ */
+void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
+ void *buf)
+{
+ int len;
+
+ ubifs_err("corrupted data at LEB %d:%d", lnum, offs);
+ if (dbg_failure_mode)
+ return;
+ len = c->leb_size - offs;
+ if (len > 4096)
+ len = 4096;
+ dbg_err("first %d bytes from LEB %d:%d", len, lnum, offs);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 4, buf, len, 1);
+}
+
+/**
+ * ubifs_scan - scan a logical eraseblock.
+ * @c: UBIFS file-system description object
+ * @lnum: logical eraseblock number
+ * @offs: offset to start at (usually zero)
+ * @sbuf: scan buffer (must be c->leb_size)
+ *
+ * This function scans LEB number @lnum and returns complete information about
+ * its contents. Returns an error code in case of failure.
+ */
+struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
+ int offs, void *sbuf)
+{
+ void *buf = sbuf + offs;
+ int err, len = c->leb_size - offs;
+ struct ubifs_scan_leb *sleb;
+
+ sleb = ubifs_start_scan(c, lnum, offs, sbuf);
+ if (IS_ERR(sleb))
+ return sleb;
+
+ while (len >= 8) {
+ struct ubifs_ch *ch = buf;
+ int node_len, ret;
+
+ dbg_scan("look at LEB %d:%d (%d bytes left)",
+ lnum, offs, len);
+
+ cond_resched();
+
+ ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
+
+ if (ret > 0) {
+ /* Padding bytes or a valid padding node */
+ offs += ret;
+ buf += ret;
+ len -= ret;
+ continue;
+ }
+
+ if (ret == SCANNED_EMPTY_SPACE)
+ /* Empty space is checked later */
+ break;
+
+ switch (ret) {
+ case SCANNED_GARBAGE:
+ dbg_err("garbage");
+ goto corrupted;
+ case SCANNED_A_NODE:
+ break;
+ case SCANNED_A_CORRUPT_NODE:
+ case SCANNED_A_BAD_PAD_NODE:
+ dbg_err("bad node");
+ goto corrupted;
+ default:
+ dbg_err("unknown");
+ goto corrupted;
+ }
+
+ err = ubifs_add_snod(c, sleb, buf, offs);
+ if (err)
+ goto error;
+
+ node_len = ALIGN(le32_to_cpu(ch->len), 8);
+ offs += node_len;
+ buf += node_len;
+ len -= node_len;
+ }
+
+ if (offs % c->min_io_size)
+ goto corrupted;
+
+ ubifs_end_scan(c, sleb, lnum, offs);
+
+ for (; len > 4; offs += 4, buf = buf + 4, len -= 4)
+ if (*(uint32_t *)buf != 0xffffffff)
+ break;
+ for (; len; offs++, buf++, len--)
+ if (*(uint8_t *)buf != 0xff) {
+ ubifs_err("corrupt empty space at LEB %d:%d",
+ lnum, offs);
+ goto corrupted;
+ }
+
+ return sleb;
+
+corrupted:
+ ubifs_scanned_corruption(c, lnum, offs, buf);
+ err = -EUCLEAN;
+error:
+ ubifs_err("LEB %d scanning failed", lnum);
+ ubifs_scan_destroy(sleb);
+ return ERR_PTR(err);
+}
+
+/**
+ * ubifs_scan_destroy - destroy LEB scanning information.
+ * @sleb: scanning information to free
+ */
+void ubifs_scan_destroy(struct ubifs_scan_leb *sleb)
+{
+ struct ubifs_scan_node *node;
+ struct list_head *head;
+
+ head = &sleb->nodes;
+ while (!list_empty(head)) {
+ node = list_entry(head->next, struct ubifs_scan_node, list);
+ list_del(&node->list);
+ kfree(node);
+ }
+ kfree(sleb);
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/super.c b/qemu/roms/u-boot/fs/ubifs/super.c
new file mode 100644
index 000000000..748ab6792
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/super.c
@@ -0,0 +1,1199 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This file implements UBIFS initialization and VFS superblock operations. Some
+ * initialization stuff which is rather large and complex is placed at
+ * corresponding subsystems, but most of it is here.
+ */
+
+#include "ubifs.h"
+#include <linux/math64.h>
+
+#define INODE_LOCKED_MAX 64
+
+struct super_block *ubifs_sb;
+static struct inode *inodes_locked_down[INODE_LOCKED_MAX];
+
+/* shrinker.c */
+
+/* List of all UBIFS file-system instances */
+struct list_head ubifs_infos;
+
+/* linux/fs/super.c */
+
+static int sb_set(struct super_block *sb, void *data)
+{
+ dev_t *dev = data;
+
+ sb->s_dev = *dev;
+ return 0;
+}
+
+/**
+ * sget - find or create a superblock
+ * @type: filesystem type superblock should belong to
+ * @test: comparison callback
+ * @set: setup callback
+ * @data: argument to each of them
+ */
+struct super_block *sget(struct file_system_type *type,
+ int (*test)(struct super_block *,void *),
+ int (*set)(struct super_block *,void *),
+ void *data)
+{
+ struct super_block *s = NULL;
+ int err;
+
+ s = kzalloc(sizeof(struct super_block), GFP_USER);
+ if (!s) {
+ err = -ENOMEM;
+ return ERR_PTR(err);
+ }
+
+ INIT_LIST_HEAD(&s->s_instances);
+ INIT_LIST_HEAD(&s->s_inodes);
+ s->s_time_gran = 1000000000;
+
+ err = set(s, data);
+ if (err) {
+ return ERR_PTR(err);
+ }
+ s->s_type = type;
+ strncpy(s->s_id, type->name, sizeof(s->s_id));
+ list_add(&s->s_instances, &type->fs_supers);
+ return s;
+}
+
+/**
+ * validate_inode - validate inode.
+ * @c: UBIFS file-system description object
+ * @inode: the inode to validate
+ *
+ * This is a helper function for 'ubifs_iget()' which validates various fields
+ * of a newly built inode to make sure they contain sane values and prevent
+ * possible vulnerabilities. Returns zero if the inode is all right and
+ * a non-zero error code if not.
+ */
+static int validate_inode(struct ubifs_info *c, const struct inode *inode)
+{
+ int err;
+ const struct ubifs_inode *ui = ubifs_inode(inode);
+
+ if (inode->i_size > c->max_inode_sz) {
+ ubifs_err("inode is too large (%lld)",
+ (long long)inode->i_size);
+ return 1;
+ }
+
+ if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) {
+ ubifs_err("unknown compression type %d", ui->compr_type);
+ return 2;
+ }
+
+ if (ui->data_len < 0 || ui->data_len > UBIFS_MAX_INO_DATA)
+ return 4;
+
+ if (!ubifs_compr_present(ui->compr_type)) {
+ ubifs_warn("inode %lu uses '%s' compression, but it was not "
+ "compiled in", inode->i_ino,
+ ubifs_compr_name(ui->compr_type));
+ }
+
+ err = dbg_check_dir_size(c, inode);
+ return err;
+}
+
+struct inode *iget_locked(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+
+ inode = (struct inode *)malloc(sizeof(struct ubifs_inode));
+ if (inode) {
+ inode->i_ino = ino;
+ inode->i_sb = sb;
+ list_add(&inode->i_sb_list, &sb->s_inodes);
+ inode->i_state = I_LOCK | I_NEW;
+ }
+
+ return inode;
+}
+
+int ubifs_iput(struct inode *inode)
+{
+ list_del_init(&inode->i_sb_list);
+
+ free(inode);
+ return 0;
+}
+
+/*
+ * Lock (save) inode in inode array for readback after recovery
+ */
+void iput(struct inode *inode)
+{
+ int i;
+ struct inode *ino;
+
+ /*
+ * Search end of list
+ */
+ for (i = 0; i < INODE_LOCKED_MAX; i++) {
+ if (inodes_locked_down[i] == NULL)
+ break;
+ }
+
+ if (i >= INODE_LOCKED_MAX) {
+ ubifs_err("Error, can't lock (save) more inodes while recovery!!!");
+ return;
+ }
+
+ /*
+ * Allocate and use new inode
+ */
+ ino = (struct inode *)malloc(sizeof(struct ubifs_inode));
+ memcpy(ino, inode, sizeof(struct ubifs_inode));
+
+ /*
+ * Finally save inode in array
+ */
+ inodes_locked_down[i] = ino;
+}
+
+struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
+{
+ int err;
+ union ubifs_key key;
+ struct ubifs_ino_node *ino;
+ struct ubifs_info *c = sb->s_fs_info;
+ struct inode *inode;
+ struct ubifs_inode *ui;
+ int i;
+
+ dbg_gen("inode %lu", inum);
+
+ /*
+ * U-Boot special handling of locked down inodes via recovery
+ * e.g. ubifs_recover_size()
+ */
+ for (i = 0; i < INODE_LOCKED_MAX; i++) {
+ /*
+ * Exit on last entry (NULL), inode not found in list
+ */
+ if (inodes_locked_down[i] == NULL)
+ break;
+
+ if (inodes_locked_down[i]->i_ino == inum) {
+ /*
+ * We found the locked down inode in our array,
+ * so just return this pointer instead of creating
+ * a new one.
+ */
+ return inodes_locked_down[i];
+ }
+ }
+
+ inode = iget_locked(sb, inum);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+ ui = ubifs_inode(inode);
+
+ ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
+ if (!ino) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ino_key_init(c, &key, inode->i_ino);
+
+ err = ubifs_tnc_lookup(c, &key, ino);
+ if (err)
+ goto out_ino;
+
+ inode->i_flags |= (S_NOCMTIME | S_NOATIME);
+ inode->i_nlink = le32_to_cpu(ino->nlink);
+ inode->i_uid = le32_to_cpu(ino->uid);
+ inode->i_gid = le32_to_cpu(ino->gid);
+ inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec);
+ inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
+ inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
+ inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec);
+ inode->i_ctime.tv_sec = (int64_t)le64_to_cpu(ino->ctime_sec);
+ inode->i_ctime.tv_nsec = le32_to_cpu(ino->ctime_nsec);
+ inode->i_mode = le32_to_cpu(ino->mode);
+ inode->i_size = le64_to_cpu(ino->size);
+
+ ui->data_len = le32_to_cpu(ino->data_len);
+ ui->flags = le32_to_cpu(ino->flags);
+ ui->compr_type = le16_to_cpu(ino->compr_type);
+ ui->creat_sqnum = le64_to_cpu(ino->creat_sqnum);
+ ui->synced_i_size = ui->ui_size = inode->i_size;
+
+ err = validate_inode(c, inode);
+ if (err)
+ goto out_invalid;
+
+ if ((inode->i_mode & S_IFMT) == S_IFLNK) {
+ if (ui->data_len <= 0 || ui->data_len > UBIFS_MAX_INO_DATA) {
+ err = 12;
+ goto out_invalid;
+ }
+ ui->data = kmalloc(ui->data_len + 1, GFP_NOFS);
+ if (!ui->data) {
+ err = -ENOMEM;
+ goto out_ino;
+ }
+ memcpy(ui->data, ino->data, ui->data_len);
+ ((char *)ui->data)[ui->data_len] = '\0';
+ }
+
+ kfree(ino);
+ inode->i_state &= ~(I_LOCK | I_NEW);
+ return inode;
+
+out_invalid:
+ ubifs_err("inode %lu validation failed, error %d", inode->i_ino, err);
+ dbg_dump_node(c, ino);
+ dbg_dump_inode(c, inode);
+ err = -EINVAL;
+out_ino:
+ kfree(ino);
+out:
+ ubifs_err("failed to read inode %lu, error %d", inode->i_ino, err);
+ return ERR_PTR(err);
+}
+
+/**
+ * init_constants_early - initialize UBIFS constants.
+ * @c: UBIFS file-system description object
+ *
+ * This function initialize UBIFS constants which do not need the superblock to
+ * be read. It also checks that the UBI volume satisfies basic UBIFS
+ * requirements. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int init_constants_early(struct ubifs_info *c)
+{
+ if (c->vi.corrupted) {
+ ubifs_warn("UBI volume is corrupted - read-only mode");
+ c->ro_media = 1;
+ }
+
+ if (c->di.ro_mode) {
+ ubifs_msg("read-only UBI device");
+ c->ro_media = 1;
+ }
+
+ if (c->vi.vol_type == UBI_STATIC_VOLUME) {
+ ubifs_msg("static UBI volume - read-only mode");
+ c->ro_media = 1;
+ }
+
+ c->leb_cnt = c->vi.size;
+ c->leb_size = c->vi.usable_leb_size;
+ c->half_leb_size = c->leb_size / 2;
+ c->min_io_size = c->di.min_io_size;
+ c->min_io_shift = fls(c->min_io_size) - 1;
+
+ if (c->leb_size < UBIFS_MIN_LEB_SZ) {
+ ubifs_err("too small LEBs (%d bytes), min. is %d bytes",
+ c->leb_size, UBIFS_MIN_LEB_SZ);
+ return -EINVAL;
+ }
+
+ if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
+ ubifs_err("too few LEBs (%d), min. is %d",
+ c->leb_cnt, UBIFS_MIN_LEB_CNT);
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(c->min_io_size)) {
+ ubifs_err("bad min. I/O size %d", c->min_io_size);
+ return -EINVAL;
+ }
+
+ /*
+ * UBIFS aligns all node to 8-byte boundary, so to make function in
+ * io.c simpler, assume minimum I/O unit size to be 8 bytes if it is
+ * less than 8.
+ */
+ if (c->min_io_size < 8) {
+ c->min_io_size = 8;
+ c->min_io_shift = 3;
+ }
+
+ c->ref_node_alsz = ALIGN(UBIFS_REF_NODE_SZ, c->min_io_size);
+ c->mst_node_alsz = ALIGN(UBIFS_MST_NODE_SZ, c->min_io_size);
+
+ /*
+ * Initialize node length ranges which are mostly needed for node
+ * length validation.
+ */
+ c->ranges[UBIFS_PAD_NODE].len = UBIFS_PAD_NODE_SZ;
+ c->ranges[UBIFS_SB_NODE].len = UBIFS_SB_NODE_SZ;
+ c->ranges[UBIFS_MST_NODE].len = UBIFS_MST_NODE_SZ;
+ c->ranges[UBIFS_REF_NODE].len = UBIFS_REF_NODE_SZ;
+ c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ;
+ c->ranges[UBIFS_CS_NODE].len = UBIFS_CS_NODE_SZ;
+
+ c->ranges[UBIFS_INO_NODE].min_len = UBIFS_INO_NODE_SZ;
+ c->ranges[UBIFS_INO_NODE].max_len = UBIFS_MAX_INO_NODE_SZ;
+ c->ranges[UBIFS_ORPH_NODE].min_len =
+ UBIFS_ORPH_NODE_SZ + sizeof(__le64);
+ c->ranges[UBIFS_ORPH_NODE].max_len = c->leb_size;
+ c->ranges[UBIFS_DENT_NODE].min_len = UBIFS_DENT_NODE_SZ;
+ c->ranges[UBIFS_DENT_NODE].max_len = UBIFS_MAX_DENT_NODE_SZ;
+ c->ranges[UBIFS_XENT_NODE].min_len = UBIFS_XENT_NODE_SZ;
+ c->ranges[UBIFS_XENT_NODE].max_len = UBIFS_MAX_XENT_NODE_SZ;
+ c->ranges[UBIFS_DATA_NODE].min_len = UBIFS_DATA_NODE_SZ;
+ c->ranges[UBIFS_DATA_NODE].max_len = UBIFS_MAX_DATA_NODE_SZ;
+ /*
+ * Minimum indexing node size is amended later when superblock is
+ * read and the key length is known.
+ */
+ c->ranges[UBIFS_IDX_NODE].min_len = UBIFS_IDX_NODE_SZ + UBIFS_BRANCH_SZ;
+ /*
+ * Maximum indexing node size is amended later when superblock is
+ * read and the fanout is known.
+ */
+ c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX;
+
+ /*
+ * Initialize dead and dark LEB space watermarks. See gc.c for comments
+ * about these values.
+ */
+ c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size);
+ c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size);
+
+ /*
+ * Calculate how many bytes would be wasted at the end of LEB if it was
+ * fully filled with data nodes of maximum size. This is used in
+ * calculations when reporting free space.
+ */
+ c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
+
+ return 0;
+}
+
+/*
+ * init_constants_sb - initialize UBIFS constants.
+ * @c: UBIFS file-system description object
+ *
+ * This is a helper function which initializes various UBIFS constants after
+ * the superblock has been read. It also checks various UBIFS parameters and
+ * makes sure they are all right. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int init_constants_sb(struct ubifs_info *c)
+{
+ int tmp, err;
+ long long tmp64;
+
+ c->main_bytes = (long long)c->main_lebs * c->leb_size;
+ c->max_znode_sz = sizeof(struct ubifs_znode) +
+ c->fanout * sizeof(struct ubifs_zbranch);
+
+ tmp = ubifs_idx_node_sz(c, 1);
+ c->ranges[UBIFS_IDX_NODE].min_len = tmp;
+ c->min_idx_node_sz = ALIGN(tmp, 8);
+
+ tmp = ubifs_idx_node_sz(c, c->fanout);
+ c->ranges[UBIFS_IDX_NODE].max_len = tmp;
+ c->max_idx_node_sz = ALIGN(tmp, 8);
+
+ /* Make sure LEB size is large enough to fit full commit */
+ tmp = UBIFS_CS_NODE_SZ + UBIFS_REF_NODE_SZ * c->jhead_cnt;
+ tmp = ALIGN(tmp, c->min_io_size);
+ if (tmp > c->leb_size) {
+ dbg_err("too small LEB size %d, at least %d needed",
+ c->leb_size, tmp);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that the log is large enough to fit reference nodes for
+ * all buds plus one reserved LEB.
+ */
+ tmp64 = c->max_bud_bytes + c->leb_size - 1;
+ c->max_bud_cnt = div_u64(tmp64, c->leb_size);
+ tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1);
+ tmp /= c->leb_size;
+ tmp += 1;
+ if (c->log_lebs < tmp) {
+ dbg_err("too small log %d LEBs, required min. %d LEBs",
+ c->log_lebs, tmp);
+ return -EINVAL;
+ }
+
+ /*
+ * When budgeting we assume worst-case scenarios when the pages are not
+ * be compressed and direntries are of the maximum size.
+ *
+ * Note, data, which may be stored in inodes is budgeted separately, so
+ * it is not included into 'c->inode_budget'.
+ */
+ c->page_budget = UBIFS_MAX_DATA_NODE_SZ * UBIFS_BLOCKS_PER_PAGE;
+ c->inode_budget = UBIFS_INO_NODE_SZ;
+ c->dent_budget = UBIFS_MAX_DENT_NODE_SZ;
+
+ /*
+ * When the amount of flash space used by buds becomes
+ * 'c->max_bud_bytes', UBIFS just blocks all writers and starts commit.
+ * The writers are unblocked when the commit is finished. To avoid
+ * writers to be blocked UBIFS initiates background commit in advance,
+ * when number of bud bytes becomes above the limit defined below.
+ */
+ c->bg_bud_bytes = (c->max_bud_bytes * 13) >> 4;
+
+ /*
+ * Ensure minimum journal size. All the bytes in the journal heads are
+ * considered to be used, when calculating the current journal usage.
+ * Consequently, if the journal is too small, UBIFS will treat it as
+ * always full.
+ */
+ tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1;
+ if (c->bg_bud_bytes < tmp64)
+ c->bg_bud_bytes = tmp64;
+ if (c->max_bud_bytes < tmp64 + c->leb_size)
+ c->max_bud_bytes = tmp64 + c->leb_size;
+
+ err = ubifs_calc_lpt_geom(c);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * init_constants_master - initialize UBIFS constants.
+ * @c: UBIFS file-system description object
+ *
+ * This is a helper function which initializes various UBIFS constants after
+ * the master node has been read. It also checks various UBIFS parameters and
+ * makes sure they are all right.
+ */
+static void init_constants_master(struct ubifs_info *c)
+{
+ long long tmp64;
+
+ c->min_idx_lebs = ubifs_calc_min_idx_lebs(c);
+
+ /*
+ * Calculate total amount of FS blocks. This number is not used
+ * internally because it does not make much sense for UBIFS, but it is
+ * necessary to report something for the 'statfs()' call.
+ *
+ * Subtract the LEB reserved for GC, the LEB which is reserved for
+ * deletions, minimum LEBs for the index, and assume only one journal
+ * head is available.
+ */
+ tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1;
+ tmp64 *= (long long)c->leb_size - c->leb_overhead;
+ tmp64 = ubifs_reported_space(c, tmp64);
+ c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT;
+}
+
+/**
+ * free_orphans - free orphans.
+ * @c: UBIFS file-system description object
+ */
+static void free_orphans(struct ubifs_info *c)
+{
+ struct ubifs_orphan *orph;
+
+ while (c->orph_dnext) {
+ orph = c->orph_dnext;
+ c->orph_dnext = orph->dnext;
+ list_del(&orph->list);
+ kfree(orph);
+ }
+
+ while (!list_empty(&c->orph_list)) {
+ orph = list_entry(c->orph_list.next, struct ubifs_orphan, list);
+ list_del(&orph->list);
+ kfree(orph);
+ dbg_err("orphan list not empty at unmount");
+ }
+
+ vfree(c->orph_buf);
+ c->orph_buf = NULL;
+}
+
+/**
+ * check_volume_empty - check if the UBI volume is empty.
+ * @c: UBIFS file-system description object
+ *
+ * This function checks if the UBIFS volume is empty by looking if its LEBs are
+ * mapped or not. The result of checking is stored in the @c->empty variable.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int check_volume_empty(struct ubifs_info *c)
+{
+ int lnum, err;
+
+ c->empty = 1;
+ for (lnum = 0; lnum < c->leb_cnt; lnum++) {
+ err = ubi_is_mapped(c->ubi, lnum);
+ if (unlikely(err < 0))
+ return err;
+ if (err == 1) {
+ c->empty = 0;
+ break;
+ }
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
+/**
+ * mount_ubifs - mount UBIFS file-system.
+ * @c: UBIFS file-system description object
+ *
+ * This function mounts UBIFS file system. Returns zero in case of success and
+ * a negative error code in case of failure.
+ *
+ * Note, the function does not de-allocate resources it it fails half way
+ * through, and the caller has to do this instead.
+ */
+static int mount_ubifs(struct ubifs_info *c)
+{
+ struct super_block *sb = c->vfs_sb;
+ int err, mounted_read_only = (sb->s_flags & MS_RDONLY);
+ long long x;
+ size_t sz;
+
+ err = init_constants_early(c);
+ if (err)
+ return err;
+
+ err = ubifs_debugging_init(c);
+ if (err)
+ return err;
+
+ err = check_volume_empty(c);
+ if (err)
+ goto out_free;
+
+ if (c->empty && (mounted_read_only || c->ro_media)) {
+ /*
+ * This UBI volume is empty, and read-only, or the file system
+ * is mounted read-only - we cannot format it.
+ */
+ ubifs_err("can't format empty UBI volume: read-only %s",
+ c->ro_media ? "UBI volume" : "mount");
+ err = -EROFS;
+ goto out_free;
+ }
+
+ if (c->ro_media && !mounted_read_only) {
+ ubifs_err("cannot mount read-write - read-only media");
+ err = -EROFS;
+ goto out_free;
+ }
+
+ /*
+ * The requirement for the buffer is that it should fit indexing B-tree
+ * height amount of integers. We assume the height if the TNC tree will
+ * never exceed 64.
+ */
+ err = -ENOMEM;
+ c->bottom_up_buf = kmalloc(BOTTOM_UP_HEIGHT * sizeof(int), GFP_KERNEL);
+ if (!c->bottom_up_buf)
+ goto out_free;
+
+ c->sbuf = vmalloc(c->leb_size);
+ if (!c->sbuf)
+ goto out_free;
+
+ /*
+ * We have to check all CRCs, even for data nodes, when we mount the FS
+ * (specifically, when we are replaying).
+ */
+ c->always_chk_crc = 1;
+
+ err = ubifs_read_superblock(c);
+ if (err)
+ goto out_free;
+
+ /*
+ * Make sure the compressor which is set as default in the superblock
+ * or overridden by mount options is actually compiled in.
+ */
+ if (!ubifs_compr_present(c->default_compr)) {
+ ubifs_err("'compressor \"%s\" is not compiled in",
+ ubifs_compr_name(c->default_compr));
+ goto out_free;
+ }
+
+ dbg_failure_mode_registration(c);
+
+ err = init_constants_sb(c);
+ if (err)
+ goto out_free;
+
+ sz = ALIGN(c->max_idx_node_sz, c->min_io_size);
+ sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size);
+ c->cbuf = kmalloc(sz, GFP_NOFS);
+ if (!c->cbuf) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
+
+ err = ubifs_read_master(c);
+ if (err)
+ goto out_master;
+
+ init_constants_master(c);
+
+ if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
+ ubifs_msg("recovery needed");
+ c->need_recovery = 1;
+ }
+
+ err = ubifs_lpt_init(c, 1, !mounted_read_only);
+ if (err)
+ goto out_lpt;
+
+ err = dbg_check_idx_size(c, c->old_idx_sz);
+ if (err)
+ goto out_lpt;
+
+ err = ubifs_replay_journal(c);
+ if (err)
+ goto out_journal;
+
+ err = ubifs_mount_orphans(c, c->need_recovery, mounted_read_only);
+ if (err)
+ goto out_orphans;
+
+ if (c->need_recovery) {
+ err = ubifs_recover_size(c);
+ if (err)
+ goto out_orphans;
+ }
+
+ spin_lock(&ubifs_infos_lock);
+ list_add_tail(&c->infos_list, &ubifs_infos);
+ spin_unlock(&ubifs_infos_lock);
+
+ if (c->need_recovery) {
+ if (mounted_read_only)
+ ubifs_msg("recovery deferred");
+ else {
+ c->need_recovery = 0;
+ ubifs_msg("recovery completed");
+ }
+ }
+
+ err = dbg_check_filesystem(c);
+ if (err)
+ goto out_infos;
+
+ c->always_chk_crc = 0;
+
+ ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"",
+ c->vi.ubi_num, c->vi.vol_id, c->vi.name);
+ if (mounted_read_only)
+ ubifs_msg("mounted read-only");
+ x = (long long)c->main_lebs * c->leb_size;
+ ubifs_msg("file system size: %lld bytes (%lld KiB, %lld MiB, %d "
+ "LEBs)", x, x >> 10, x >> 20, c->main_lebs);
+ x = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes;
+ ubifs_msg("journal size: %lld bytes (%lld KiB, %lld MiB, %d "
+ "LEBs)", x, x >> 10, x >> 20, c->log_lebs + c->max_bud_cnt);
+ ubifs_msg("media format: w%d/r%d (latest is w%d/r%d)",
+ c->fmt_version, c->ro_compat_version,
+ UBIFS_FORMAT_VERSION, UBIFS_RO_COMPAT_VERSION);
+ ubifs_msg("default compressor: %s", ubifs_compr_name(c->default_compr));
+ ubifs_msg("reserved for root: %llu bytes (%llu KiB)",
+ c->report_rp_size, c->report_rp_size >> 10);
+
+ dbg_msg("min. I/O unit size: %d bytes", c->min_io_size);
+ dbg_msg("LEB size: %d bytes (%d KiB)",
+ c->leb_size, c->leb_size >> 10);
+ dbg_msg("data journal heads: %d",
+ c->jhead_cnt - NONDATA_JHEADS_CNT);
+ dbg_msg("UUID: %02X%02X%02X%02X-%02X%02X"
+ "-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
+ c->uuid[0], c->uuid[1], c->uuid[2], c->uuid[3],
+ c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
+ c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
+ c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
+ dbg_msg("big_lpt %d", c->big_lpt);
+ dbg_msg("log LEBs: %d (%d - %d)",
+ c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
+ dbg_msg("LPT area LEBs: %d (%d - %d)",
+ c->lpt_lebs, c->lpt_first, c->lpt_last);
+ dbg_msg("orphan area LEBs: %d (%d - %d)",
+ c->orph_lebs, c->orph_first, c->orph_last);
+ dbg_msg("main area LEBs: %d (%d - %d)",
+ c->main_lebs, c->main_first, c->leb_cnt - 1);
+ dbg_msg("index LEBs: %d", c->lst.idx_lebs);
+ dbg_msg("total index bytes: %lld (%lld KiB, %lld MiB)",
+ c->old_idx_sz, c->old_idx_sz >> 10, c->old_idx_sz >> 20);
+ dbg_msg("key hash type: %d", c->key_hash_type);
+ dbg_msg("tree fanout: %d", c->fanout);
+ dbg_msg("reserved GC LEB: %d", c->gc_lnum);
+ dbg_msg("first main LEB: %d", c->main_first);
+ dbg_msg("max. znode size %d", c->max_znode_sz);
+ dbg_msg("max. index node size %d", c->max_idx_node_sz);
+ dbg_msg("node sizes: data %zu, inode %zu, dentry %zu",
+ UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ);
+ dbg_msg("node sizes: trun %zu, sb %zu, master %zu",
+ UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ);
+ dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu",
+ UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ);
+ dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu",
+ UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ,
+ UBIFS_MAX_DENT_NODE_SZ);
+ dbg_msg("dead watermark: %d", c->dead_wm);
+ dbg_msg("dark watermark: %d", c->dark_wm);
+ dbg_msg("LEB overhead: %d", c->leb_overhead);
+ x = (long long)c->main_lebs * c->dark_wm;
+ dbg_msg("max. dark space: %lld (%lld KiB, %lld MiB)",
+ x, x >> 10, x >> 20);
+ dbg_msg("maximum bud bytes: %lld (%lld KiB, %lld MiB)",
+ c->max_bud_bytes, c->max_bud_bytes >> 10,
+ c->max_bud_bytes >> 20);
+ dbg_msg("BG commit bud bytes: %lld (%lld KiB, %lld MiB)",
+ c->bg_bud_bytes, c->bg_bud_bytes >> 10,
+ c->bg_bud_bytes >> 20);
+ dbg_msg("current bud bytes %lld (%lld KiB, %lld MiB)",
+ c->bud_bytes, c->bud_bytes >> 10, c->bud_bytes >> 20);
+ dbg_msg("max. seq. number: %llu", c->max_sqnum);
+ dbg_msg("commit number: %llu", c->cmt_no);
+
+ return 0;
+
+out_infos:
+ spin_lock(&ubifs_infos_lock);
+ list_del(&c->infos_list);
+ spin_unlock(&ubifs_infos_lock);
+out_orphans:
+ free_orphans(c);
+out_journal:
+out_lpt:
+ ubifs_lpt_free(c, 0);
+out_master:
+ kfree(c->mst_node);
+ kfree(c->rcvrd_mst_node);
+ if (c->bgt)
+ kthread_stop(c->bgt);
+ kfree(c->cbuf);
+out_free:
+ vfree(c->ileb_buf);
+ vfree(c->sbuf);
+ kfree(c->bottom_up_buf);
+ ubifs_debugging_exit(c);
+ return err;
+}
+
+/**
+ * ubifs_umount - un-mount UBIFS file-system.
+ * @c: UBIFS file-system description object
+ *
+ * Note, this function is called to free allocated resourced when un-mounting,
+ * as well as free resources when an error occurred while we were half way
+ * through mounting (error path cleanup function). So it has to make sure the
+ * resource was actually allocated before freeing it.
+ */
+void ubifs_umount(struct ubifs_info *c)
+{
+ dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num,
+ c->vi.vol_id);
+
+ spin_lock(&ubifs_infos_lock);
+ list_del(&c->infos_list);
+ spin_unlock(&ubifs_infos_lock);
+
+ if (c->bgt)
+ kthread_stop(c->bgt);
+
+ free_orphans(c);
+ ubifs_lpt_free(c, 0);
+
+ kfree(c->cbuf);
+ kfree(c->rcvrd_mst_node);
+ kfree(c->mst_node);
+ vfree(c->ileb_buf);
+ vfree(c->sbuf);
+ kfree(c->bottom_up_buf);
+ ubifs_debugging_exit(c);
+
+ /* Finally free U-Boot's global copy of superblock */
+ if (ubifs_sb != NULL) {
+ free(ubifs_sb->s_fs_info);
+ free(ubifs_sb);
+ }
+}
+
+/**
+ * open_ubi - parse UBI device name string and open the UBI device.
+ * @name: UBI volume name
+ * @mode: UBI volume open mode
+ *
+ * There are several ways to specify UBI volumes when mounting UBIFS:
+ * o ubiX_Y - UBI device number X, volume Y;
+ * o ubiY - UBI device number 0, volume Y;
+ * o ubiX:NAME - mount UBI device X, volume with name NAME;
+ * o ubi:NAME - mount UBI device 0, volume with name NAME.
+ *
+ * Alternative '!' separator may be used instead of ':' (because some shells
+ * like busybox may interpret ':' as an NFS host name separator). This function
+ * returns ubi volume object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_volume_desc *open_ubi(const char *name, int mode)
+{
+ int dev, vol;
+ char *endptr;
+
+ if (name[0] != 'u' || name[1] != 'b' || name[2] != 'i')
+ return ERR_PTR(-EINVAL);
+
+ /* ubi:NAME method */
+ if ((name[3] == ':' || name[3] == '!') && name[4] != '\0')
+ return ubi_open_volume_nm(0, name + 4, mode);
+
+ if (!isdigit(name[3]))
+ return ERR_PTR(-EINVAL);
+
+ dev = simple_strtoul(name + 3, &endptr, 0);
+
+ /* ubiY method */
+ if (*endptr == '\0')
+ return ubi_open_volume(0, dev, mode);
+
+ /* ubiX_Y method */
+ if (*endptr == '_' && isdigit(endptr[1])) {
+ vol = simple_strtoul(endptr + 1, &endptr, 0);
+ if (*endptr != '\0')
+ return ERR_PTR(-EINVAL);
+ return ubi_open_volume(dev, vol, mode);
+ }
+
+ /* ubiX:NAME method */
+ if ((*endptr == ':' || *endptr == '!') && endptr[1] != '\0')
+ return ubi_open_volume_nm(dev, ++endptr, mode);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct ubi_volume_desc *ubi = sb->s_fs_info;
+ struct ubifs_info *c;
+ struct inode *root;
+ int err;
+
+ c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ spin_lock_init(&c->cnt_lock);
+ spin_lock_init(&c->cs_lock);
+ spin_lock_init(&c->buds_lock);
+ spin_lock_init(&c->space_lock);
+ spin_lock_init(&c->orphan_lock);
+ init_rwsem(&c->commit_sem);
+ mutex_init(&c->lp_mutex);
+ mutex_init(&c->tnc_mutex);
+ mutex_init(&c->log_mutex);
+ mutex_init(&c->mst_mutex);
+ mutex_init(&c->umount_mutex);
+ init_waitqueue_head(&c->cmt_wq);
+ c->buds = RB_ROOT;
+ c->old_idx = RB_ROOT;
+ c->size_tree = RB_ROOT;
+ c->orph_tree = RB_ROOT;
+ INIT_LIST_HEAD(&c->infos_list);
+ INIT_LIST_HEAD(&c->idx_gc);
+ INIT_LIST_HEAD(&c->replay_list);
+ INIT_LIST_HEAD(&c->replay_buds);
+ INIT_LIST_HEAD(&c->uncat_list);
+ INIT_LIST_HEAD(&c->empty_list);
+ INIT_LIST_HEAD(&c->freeable_list);
+ INIT_LIST_HEAD(&c->frdi_idx_list);
+ INIT_LIST_HEAD(&c->unclean_leb_list);
+ INIT_LIST_HEAD(&c->old_buds);
+ INIT_LIST_HEAD(&c->orph_list);
+ INIT_LIST_HEAD(&c->orph_new);
+
+ c->highest_inum = UBIFS_FIRST_INO;
+ c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
+
+ ubi_get_volume_info(ubi, &c->vi);
+ ubi_get_device_info(c->vi.ubi_num, &c->di);
+
+ /* Re-open the UBI device in read-write mode */
+ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY);
+ if (IS_ERR(c->ubi)) {
+ err = PTR_ERR(c->ubi);
+ goto out_free;
+ }
+
+ c->vfs_sb = sb;
+
+ sb->s_fs_info = c;
+ sb->s_magic = UBIFS_SUPER_MAGIC;
+ sb->s_blocksize = UBIFS_BLOCK_SIZE;
+ sb->s_blocksize_bits = UBIFS_BLOCK_SHIFT;
+ sb->s_dev = c->vi.cdev;
+ sb->s_maxbytes = c->max_inode_sz = key_max_inode_size(c);
+ if (c->max_inode_sz > MAX_LFS_FILESIZE)
+ sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE;
+
+ if (c->rw_incompat) {
+ ubifs_err("the file-system is not R/W-compatible");
+ ubifs_msg("on-flash format version is w%d/r%d, but software "
+ "only supports up to version w%d/r%d", c->fmt_version,
+ c->ro_compat_version, UBIFS_FORMAT_VERSION,
+ UBIFS_RO_COMPAT_VERSION);
+ return -EROFS;
+ }
+
+ mutex_lock(&c->umount_mutex);
+ err = mount_ubifs(c);
+ if (err) {
+ ubifs_assert(err < 0);
+ goto out_unlock;
+ }
+
+ /* Read the root inode */
+ root = ubifs_iget(sb, UBIFS_ROOT_INO);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto out_umount;
+ }
+
+ sb->s_root = NULL;
+
+ mutex_unlock(&c->umount_mutex);
+ return 0;
+
+out_umount:
+ ubifs_umount(c);
+out_unlock:
+ mutex_unlock(&c->umount_mutex);
+ ubi_close_volume(c->ubi);
+out_free:
+ kfree(c);
+ return err;
+}
+
+static int sb_test(struct super_block *sb, void *data)
+{
+ dev_t *dev = data;
+
+ return sb->s_dev == *dev;
+}
+
+static int ubifs_get_sb(struct file_system_type *fs_type, int flags,
+ const char *name, void *data, struct vfsmount *mnt)
+{
+ struct ubi_volume_desc *ubi;
+ struct ubi_volume_info vi;
+ struct super_block *sb;
+ int err;
+
+ dbg_gen("name %s, flags %#x", name, flags);
+
+ /*
+ * Get UBI device number and volume ID. Mount it read-only so far
+ * because this might be a new mount point, and UBI allows only one
+ * read-write user at a time.
+ */
+ ubi = open_ubi(name, UBI_READONLY);
+ if (IS_ERR(ubi)) {
+ ubifs_err("cannot open \"%s\", error %d",
+ name, (int)PTR_ERR(ubi));
+ return PTR_ERR(ubi);
+ }
+ ubi_get_volume_info(ubi, &vi);
+
+ dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id);
+
+ sb = sget(fs_type, &sb_test, &sb_set, &vi.cdev);
+ if (IS_ERR(sb)) {
+ err = PTR_ERR(sb);
+ goto out_close;
+ }
+
+ if (sb->s_root) {
+ /* A new mount point for already mounted UBIFS */
+ dbg_gen("this ubi volume is already mounted");
+ if ((flags ^ sb->s_flags) & MS_RDONLY) {
+ err = -EBUSY;
+ goto out_deact;
+ }
+ } else {
+ sb->s_flags = flags;
+ /*
+ * Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is
+ * replaced by 'c'.
+ */
+ sb->s_fs_info = ubi;
+ err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ if (err)
+ goto out_deact;
+ /* We do not support atime */
+ sb->s_flags |= MS_ACTIVE | MS_NOATIME;
+ }
+
+ /* 'fill_super()' opens ubi again so we must close it here */
+ ubi_close_volume(ubi);
+
+ ubifs_sb = sb;
+ return 0;
+
+out_deact:
+ up_write(&sb->s_umount);
+out_close:
+ ubi_close_volume(ubi);
+ return err;
+}
+
+int __init ubifs_init(void)
+{
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct ubifs_ch) != 24);
+
+ /* Make sure node sizes are 8-byte aligned */
+ BUILD_BUG_ON(UBIFS_CH_SZ & 7);
+ BUILD_BUG_ON(UBIFS_INO_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_DENT_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_XENT_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_DATA_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_SB_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_MST_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_REF_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_CS_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_ORPH_NODE_SZ & 7);
+
+ BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ & 7);
+ BUILD_BUG_ON(UBIFS_MAX_NODE_SZ & 7);
+ BUILD_BUG_ON(MIN_WRITE_SZ & 7);
+
+ /* Check min. node size */
+ BUILD_BUG_ON(UBIFS_INO_NODE_SZ < MIN_WRITE_SZ);
+ BUILD_BUG_ON(UBIFS_DENT_NODE_SZ < MIN_WRITE_SZ);
+ BUILD_BUG_ON(UBIFS_XENT_NODE_SZ < MIN_WRITE_SZ);
+ BUILD_BUG_ON(UBIFS_TRUN_NODE_SZ < MIN_WRITE_SZ);
+
+ BUILD_BUG_ON(UBIFS_MAX_DENT_NODE_SZ > UBIFS_MAX_NODE_SZ);
+ BUILD_BUG_ON(UBIFS_MAX_XENT_NODE_SZ > UBIFS_MAX_NODE_SZ);
+ BUILD_BUG_ON(UBIFS_MAX_DATA_NODE_SZ > UBIFS_MAX_NODE_SZ);
+ BUILD_BUG_ON(UBIFS_MAX_INO_NODE_SZ > UBIFS_MAX_NODE_SZ);
+
+ /* Defined node sizes */
+ BUILD_BUG_ON(UBIFS_SB_NODE_SZ != 4096);
+ BUILD_BUG_ON(UBIFS_MST_NODE_SZ != 512);
+ BUILD_BUG_ON(UBIFS_INO_NODE_SZ != 160);
+ BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64);
+
+ /*
+ * We use 2 bit wide bit-fields to store compression type, which should
+ * be amended if more compressors are added. The bit-fields are:
+ * @compr_type in 'struct ubifs_inode', @default_compr in
+ * 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'.
+ */
+ BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4);
+
+ /*
+ * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to
+ * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2.
+ */
+ if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) {
+ ubifs_err("VFS page cache size is %u bytes, but UBIFS requires"
+ " at least 4096 bytes",
+ (unsigned int)PAGE_CACHE_SIZE);
+ return -EINVAL;
+ }
+
+ err = -ENOMEM;
+
+ err = ubifs_compressors_init();
+ if (err)
+ goto out_shrinker;
+
+ return 0;
+
+out_shrinker:
+ return err;
+}
+
+/*
+ * ubifsmount...
+ */
+
+static struct file_system_type ubifs_fs_type = {
+ .name = "ubifs",
+ .owner = THIS_MODULE,
+ .get_sb = ubifs_get_sb,
+};
+
+int ubifs_mount(char *name)
+{
+ int flags;
+ void *data;
+ struct vfsmount *mnt;
+ int ret;
+ struct ubifs_info *c;
+
+ /*
+ * First unmount if allready mounted
+ */
+ if (ubifs_sb)
+ ubifs_umount(ubifs_sb->s_fs_info);
+
+ INIT_LIST_HEAD(&ubifs_infos);
+ INIT_LIST_HEAD(&ubifs_fs_type.fs_supers);
+
+ /*
+ * Mount in read-only mode
+ */
+ flags = MS_RDONLY;
+ data = NULL;
+ mnt = NULL;
+ ret = ubifs_get_sb(&ubifs_fs_type, flags, name, data, mnt);
+ if (ret) {
+ ubifs_err("Error reading superblock on volume '%s' errno=%d!\n", name, ret);
+ return -1;
+ }
+
+ c = ubifs_sb->s_fs_info;
+ ubi_close_volume(c->ubi);
+
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/tnc.c b/qemu/roms/u-boot/fs/ubifs/tnc.c
new file mode 100644
index 000000000..ccda9387b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/tnc.c
@@ -0,0 +1,2767 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file implements TNC (Tree Node Cache) which caches indexing nodes of
+ * the UBIFS B-tree.
+ *
+ * At the moment the locking rules of the TNC tree are quite simple and
+ * straightforward. We just have a mutex and lock it when we traverse the
+ * tree. If a znode is not in memory, we read it from flash while still having
+ * the mutex locked.
+ */
+
+#include "ubifs.h"
+
+/*
+ * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
+ * @NAME_LESS: name corresponding to the first argument is less than second
+ * @NAME_MATCHES: names match
+ * @NAME_GREATER: name corresponding to the second argument is greater than
+ * first
+ * @NOT_ON_MEDIA: node referred by zbranch does not exist on the media
+ *
+ * These constants were introduce to improve readability.
+ */
+enum {
+ NAME_LESS = 0,
+ NAME_MATCHES = 1,
+ NAME_GREATER = 2,
+ NOT_ON_MEDIA = 3,
+};
+
+/**
+ * insert_old_idx - record an index node obsoleted since the last commit start.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number of obsoleted index node
+ * @offs: offset of obsoleted index node
+ *
+ * Returns %0 on success, and a negative error code on failure.
+ *
+ * For recovery, there must always be a complete intact version of the index on
+ * flash at all times. That is called the "old index". It is the index as at the
+ * time of the last successful commit. Many of the index nodes in the old index
+ * may be dirty, but they must not be erased until the next successful commit
+ * (at which point that index becomes the old index).
+ *
+ * That means that the garbage collection and the in-the-gaps method of
+ * committing must be able to determine if an index node is in the old index.
+ * Most of the old index nodes can be found by looking up the TNC using the
+ * 'lookup_znode()' function. However, some of the old index nodes may have
+ * been deleted from the current index or may have been changed so much that
+ * they cannot be easily found. In those cases, an entry is added to an RB-tree.
+ * That is what this function does. The RB-tree is ordered by LEB number and
+ * offset because they uniquely identify the old index node.
+ */
+static int insert_old_idx(struct ubifs_info *c, int lnum, int offs)
+{
+ struct ubifs_old_idx *old_idx, *o;
+ struct rb_node **p, *parent = NULL;
+
+ old_idx = kmalloc(sizeof(struct ubifs_old_idx), GFP_NOFS);
+ if (unlikely(!old_idx))
+ return -ENOMEM;
+ old_idx->lnum = lnum;
+ old_idx->offs = offs;
+
+ p = &c->old_idx.rb_node;
+ while (*p) {
+ parent = *p;
+ o = rb_entry(parent, struct ubifs_old_idx, rb);
+ if (lnum < o->lnum)
+ p = &(*p)->rb_left;
+ else if (lnum > o->lnum)
+ p = &(*p)->rb_right;
+ else if (offs < o->offs)
+ p = &(*p)->rb_left;
+ else if (offs > o->offs)
+ p = &(*p)->rb_right;
+ else {
+ ubifs_err("old idx added twice!");
+ kfree(old_idx);
+ return 0;
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+ rb_insert_color(&old_idx->rb, &c->old_idx);
+ return 0;
+}
+
+/**
+ * insert_old_idx_znode - record a znode obsoleted since last commit start.
+ * @c: UBIFS file-system description object
+ * @znode: znode of obsoleted index node
+ *
+ * Returns %0 on success, and a negative error code on failure.
+ */
+int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode)
+{
+ if (znode->parent) {
+ struct ubifs_zbranch *zbr;
+
+ zbr = &znode->parent->zbranch[znode->iip];
+ if (zbr->len)
+ return insert_old_idx(c, zbr->lnum, zbr->offs);
+ } else
+ if (c->zroot.len)
+ return insert_old_idx(c, c->zroot.lnum,
+ c->zroot.offs);
+ return 0;
+}
+
+/**
+ * ins_clr_old_idx_znode - record a znode obsoleted since last commit start.
+ * @c: UBIFS file-system description object
+ * @znode: znode of obsoleted index node
+ *
+ * Returns %0 on success, and a negative error code on failure.
+ */
+static int ins_clr_old_idx_znode(struct ubifs_info *c,
+ struct ubifs_znode *znode)
+{
+ int err;
+
+ if (znode->parent) {
+ struct ubifs_zbranch *zbr;
+
+ zbr = &znode->parent->zbranch[znode->iip];
+ if (zbr->len) {
+ err = insert_old_idx(c, zbr->lnum, zbr->offs);
+ if (err)
+ return err;
+ zbr->lnum = 0;
+ zbr->offs = 0;
+ zbr->len = 0;
+ }
+ } else
+ if (c->zroot.len) {
+ err = insert_old_idx(c, c->zroot.lnum, c->zroot.offs);
+ if (err)
+ return err;
+ c->zroot.lnum = 0;
+ c->zroot.offs = 0;
+ c->zroot.len = 0;
+ }
+ return 0;
+}
+
+/**
+ * destroy_old_idx - destroy the old_idx RB-tree.
+ * @c: UBIFS file-system description object
+ *
+ * During start commit, the old_idx RB-tree is used to avoid overwriting index
+ * nodes that were in the index last commit but have since been deleted. This
+ * is necessary for recovery i.e. the old index must be kept intact until the
+ * new index is successfully written. The old-idx RB-tree is used for the
+ * in-the-gaps method of writing index nodes and is destroyed every commit.
+ */
+void destroy_old_idx(struct ubifs_info *c)
+{
+ struct rb_node *this = c->old_idx.rb_node;
+ struct ubifs_old_idx *old_idx;
+
+ while (this) {
+ if (this->rb_left) {
+ this = this->rb_left;
+ continue;
+ } else if (this->rb_right) {
+ this = this->rb_right;
+ continue;
+ }
+ old_idx = rb_entry(this, struct ubifs_old_idx, rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &old_idx->rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+ kfree(old_idx);
+ }
+ c->old_idx = RB_ROOT;
+}
+
+/**
+ * copy_znode - copy a dirty znode.
+ * @c: UBIFS file-system description object
+ * @znode: znode to copy
+ *
+ * A dirty znode being committed may not be changed, so it is copied.
+ */
+static struct ubifs_znode *copy_znode(struct ubifs_info *c,
+ struct ubifs_znode *znode)
+{
+ struct ubifs_znode *zn;
+
+ zn = kmalloc(c->max_znode_sz, GFP_NOFS);
+ if (unlikely(!zn))
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(zn, znode, c->max_znode_sz);
+ zn->cnext = NULL;
+ __set_bit(DIRTY_ZNODE, &zn->flags);
+ __clear_bit(COW_ZNODE, &zn->flags);
+
+ ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
+ __set_bit(OBSOLETE_ZNODE, &znode->flags);
+
+ if (znode->level != 0) {
+ int i;
+ const int n = zn->child_cnt;
+
+ /* The children now have new parent */
+ for (i = 0; i < n; i++) {
+ struct ubifs_zbranch *zbr = &zn->zbranch[i];
+
+ if (zbr->znode)
+ zbr->znode->parent = zn;
+ }
+ }
+
+ atomic_long_inc(&c->dirty_zn_cnt);
+ return zn;
+}
+
+/**
+ * add_idx_dirt - add dirt due to a dirty znode.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number of index node
+ * @dirt: size of index node
+ *
+ * This function updates lprops dirty space and the new size of the index.
+ */
+static int add_idx_dirt(struct ubifs_info *c, int lnum, int dirt)
+{
+ c->calc_idx_sz -= ALIGN(dirt, 8);
+ return ubifs_add_dirt(c, lnum, dirt);
+}
+
+/**
+ * dirty_cow_znode - ensure a znode is not being committed.
+ * @c: UBIFS file-system description object
+ * @zbr: branch of znode to check
+ *
+ * Returns dirtied znode on success or negative error code on failure.
+ */
+static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
+ struct ubifs_zbranch *zbr)
+{
+ struct ubifs_znode *znode = zbr->znode;
+ struct ubifs_znode *zn;
+ int err;
+
+ if (!test_bit(COW_ZNODE, &znode->flags)) {
+ /* znode is not being committed */
+ if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
+ atomic_long_inc(&c->dirty_zn_cnt);
+ atomic_long_dec(&c->clean_zn_cnt);
+ atomic_long_dec(&ubifs_clean_zn_cnt);
+ err = add_idx_dirt(c, zbr->lnum, zbr->len);
+ if (unlikely(err))
+ return ERR_PTR(err);
+ }
+ return znode;
+ }
+
+ zn = copy_znode(c, znode);
+ if (IS_ERR(zn))
+ return zn;
+
+ if (zbr->len) {
+ err = insert_old_idx(c, zbr->lnum, zbr->offs);
+ if (unlikely(err))
+ return ERR_PTR(err);
+ err = add_idx_dirt(c, zbr->lnum, zbr->len);
+ } else
+ err = 0;
+
+ zbr->znode = zn;
+ zbr->lnum = 0;
+ zbr->offs = 0;
+ zbr->len = 0;
+
+ if (unlikely(err))
+ return ERR_PTR(err);
+ return zn;
+}
+
+/**
+ * lnc_add - add a leaf node to the leaf node cache.
+ * @c: UBIFS file-system description object
+ * @zbr: zbranch of leaf node
+ * @node: leaf node
+ *
+ * Leaf nodes are non-index nodes directory entry nodes or data nodes. The
+ * purpose of the leaf node cache is to save re-reading the same leaf node over
+ * and over again. Most things are cached by VFS, however the file system must
+ * cache directory entries for readdir and for resolving hash collisions. The
+ * present implementation of the leaf node cache is extremely simple, and
+ * allows for error returns that are not used but that may be needed if a more
+ * complex implementation is created.
+ *
+ * Note, this function does not add the @node object to LNC directly, but
+ * allocates a copy of the object and adds the copy to LNC. The reason for this
+ * is that @node has been allocated outside of the TNC subsystem and will be
+ * used with @c->tnc_mutex unlock upon return from the TNC subsystem. But LNC
+ * may be changed at any time, e.g. freed by the shrinker.
+ */
+static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ const void *node)
+{
+ int err;
+ void *lnc_node;
+ const struct ubifs_dent_node *dent = node;
+
+ ubifs_assert(!zbr->leaf);
+ ubifs_assert(zbr->len != 0);
+ ubifs_assert(is_hash_key(c, &zbr->key));
+
+ err = ubifs_validate_entry(c, dent);
+ if (err) {
+ dbg_dump_stack();
+ dbg_dump_node(c, dent);
+ return err;
+ }
+
+ lnc_node = kmalloc(zbr->len, GFP_NOFS);
+ if (!lnc_node)
+ /* We don't have to have the cache, so no error */
+ return 0;
+
+ memcpy(lnc_node, node, zbr->len);
+ zbr->leaf = lnc_node;
+ return 0;
+}
+
+ /**
+ * lnc_add_directly - add a leaf node to the leaf-node-cache.
+ * @c: UBIFS file-system description object
+ * @zbr: zbranch of leaf node
+ * @node: leaf node
+ *
+ * This function is similar to 'lnc_add()', but it does not create a copy of
+ * @node but inserts @node to TNC directly.
+ */
+static int lnc_add_directly(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ void *node)
+{
+ int err;
+
+ ubifs_assert(!zbr->leaf);
+ ubifs_assert(zbr->len != 0);
+
+ err = ubifs_validate_entry(c, node);
+ if (err) {
+ dbg_dump_stack();
+ dbg_dump_node(c, node);
+ return err;
+ }
+
+ zbr->leaf = node;
+ return 0;
+}
+
+/**
+ * lnc_free - remove a leaf node from the leaf node cache.
+ * @zbr: zbranch of leaf node
+ * @node: leaf node
+ */
+static void lnc_free(struct ubifs_zbranch *zbr)
+{
+ if (!zbr->leaf)
+ return;
+ kfree(zbr->leaf);
+ zbr->leaf = NULL;
+}
+
+/**
+ * tnc_read_node_nm - read a "hashed" leaf node.
+ * @c: UBIFS file-system description object
+ * @zbr: key and position of the node
+ * @node: node is returned here
+ *
+ * This function reads a "hashed" node defined by @zbr from the leaf node cache
+ * (in it is there) or from the hash media, in which case the node is also
+ * added to LNC. Returns zero in case of success or a negative negative error
+ * code in case of failure.
+ */
+static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ void *node)
+{
+ int err;
+
+ ubifs_assert(is_hash_key(c, &zbr->key));
+
+ if (zbr->leaf) {
+ /* Read from the leaf node cache */
+ ubifs_assert(zbr->len != 0);
+ memcpy(node, zbr->leaf, zbr->len);
+ return 0;
+ }
+
+ err = ubifs_tnc_read_node(c, zbr, node);
+ if (err)
+ return err;
+
+ /* Add the node to the leaf node cache */
+ err = lnc_add(c, zbr, node);
+ return err;
+}
+
+/**
+ * try_read_node - read a node if it is a node.
+ * @c: UBIFS file-system description object
+ * @buf: buffer to read to
+ * @type: node type
+ * @len: node length (not aligned)
+ * @lnum: LEB number of node to read
+ * @offs: offset of node to read
+ *
+ * This function tries to read a node of known type and length, checks it and
+ * stores it in @buf. This function returns %1 if a node is present and %0 if
+ * a node is not present. A negative error code is returned for I/O errors.
+ * This function performs that same function as ubifs_read_node except that
+ * it does not require that there is actually a node present and instead
+ * the return code indicates if a node was read.
+ *
+ * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc
+ * is true (it is controlled by corresponding mount option). However, if
+ * @c->always_chk_crc is true, @c->no_chk_data_crc is ignored and CRC is always
+ * checked.
+ */
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+ int len, int lnum, int offs)
+{
+ int err, node_len;
+ struct ubifs_ch *ch = buf;
+ uint32_t crc, node_crc;
+
+ dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
+
+ err = ubi_read(c->ubi, lnum, buf, offs, len);
+ if (err) {
+ ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
+ type, lnum, offs, err);
+ return err;
+ }
+
+ if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC)
+ return 0;
+
+ if (ch->node_type != type)
+ return 0;
+
+ node_len = le32_to_cpu(ch->len);
+ if (node_len != len)
+ return 0;
+
+ if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc)
+ return 1;
+
+ crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
+ node_crc = le32_to_cpu(ch->crc);
+ if (crc != node_crc)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * fallible_read_node - try to read a leaf node.
+ * @c: UBIFS file-system description object
+ * @key: key of node to read
+ * @zbr: position of node
+ * @node: node returned
+ *
+ * This function tries to read a node and returns %1 if the node is read, %0
+ * if the node is not present, and a negative error code in the case of error.
+ */
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_zbranch *zbr, void *node)
+{
+ int ret;
+
+ dbg_tnc("LEB %d:%d, key %s", zbr->lnum, zbr->offs, DBGKEY(key));
+
+ ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
+ zbr->offs);
+ if (ret == 1) {
+ union ubifs_key node_key;
+ struct ubifs_dent_node *dent = node;
+
+ /* All nodes have key in the same place */
+ key_read(c, &dent->key, &node_key);
+ if (keys_cmp(c, key, &node_key) != 0)
+ ret = 0;
+ }
+ if (ret == 0 && c->replaying)
+ dbg_mnt("dangling branch LEB %d:%d len %d, key %s",
+ zbr->lnum, zbr->offs, zbr->len, DBGKEY(key));
+ return ret;
+}
+
+/**
+ * matches_name - determine if a direntry or xattr entry matches a given name.
+ * @c: UBIFS file-system description object
+ * @zbr: zbranch of dent
+ * @nm: name to match
+ *
+ * This function checks if xentry/direntry referred by zbranch @zbr matches name
+ * @nm. Returns %NAME_MATCHES if it does, %NAME_LESS if the name referred by
+ * @zbr is less than @nm, and %NAME_GREATER if it is greater than @nm. In case
+ * of failure, a negative error code is returned.
+ */
+static int matches_name(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ const struct qstr *nm)
+{
+ struct ubifs_dent_node *dent;
+ int nlen, err;
+
+ /* If possible, match against the dent in the leaf node cache */
+ if (!zbr->leaf) {
+ dent = kmalloc(zbr->len, GFP_NOFS);
+ if (!dent)
+ return -ENOMEM;
+
+ err = ubifs_tnc_read_node(c, zbr, dent);
+ if (err)
+ goto out_free;
+
+ /* Add the node to the leaf node cache */
+ err = lnc_add_directly(c, zbr, dent);
+ if (err)
+ goto out_free;
+ } else
+ dent = zbr->leaf;
+
+ nlen = le16_to_cpu(dent->nlen);
+ err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
+ if (err == 0) {
+ if (nlen == nm->len)
+ return NAME_MATCHES;
+ else if (nlen < nm->len)
+ return NAME_LESS;
+ else
+ return NAME_GREATER;
+ } else if (err < 0)
+ return NAME_LESS;
+ else
+ return NAME_GREATER;
+
+out_free:
+ kfree(dent);
+ return err;
+}
+
+/**
+ * get_znode - get a TNC znode that may not be loaded yet.
+ * @c: UBIFS file-system description object
+ * @znode: parent znode
+ * @n: znode branch slot number
+ *
+ * This function returns the znode or a negative error code.
+ */
+static struct ubifs_znode *get_znode(struct ubifs_info *c,
+ struct ubifs_znode *znode, int n)
+{
+ struct ubifs_zbranch *zbr;
+
+ zbr = &znode->zbranch[n];
+ if (zbr->znode)
+ znode = zbr->znode;
+ else
+ znode = ubifs_load_znode(c, zbr, znode, n);
+ return znode;
+}
+
+/**
+ * tnc_next - find next TNC entry.
+ * @c: UBIFS file-system description object
+ * @zn: znode is passed and returned here
+ * @n: znode branch slot number is passed and returned here
+ *
+ * This function returns %0 if the next TNC entry is found, %-ENOENT if there is
+ * no next entry, or a negative error code otherwise.
+ */
+static int tnc_next(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
+{
+ struct ubifs_znode *znode = *zn;
+ int nn = *n;
+
+ nn += 1;
+ if (nn < znode->child_cnt) {
+ *n = nn;
+ return 0;
+ }
+ while (1) {
+ struct ubifs_znode *zp;
+
+ zp = znode->parent;
+ if (!zp)
+ return -ENOENT;
+ nn = znode->iip + 1;
+ znode = zp;
+ if (nn < znode->child_cnt) {
+ znode = get_znode(c, znode, nn);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ while (znode->level != 0) {
+ znode = get_znode(c, znode, 0);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+ nn = 0;
+ break;
+ }
+ }
+ *zn = znode;
+ *n = nn;
+ return 0;
+}
+
+/**
+ * tnc_prev - find previous TNC entry.
+ * @c: UBIFS file-system description object
+ * @zn: znode is returned here
+ * @n: znode branch slot number is passed and returned here
+ *
+ * This function returns %0 if the previous TNC entry is found, %-ENOENT if
+ * there is no next entry, or a negative error code otherwise.
+ */
+static int tnc_prev(struct ubifs_info *c, struct ubifs_znode **zn, int *n)
+{
+ struct ubifs_znode *znode = *zn;
+ int nn = *n;
+
+ if (nn > 0) {
+ *n = nn - 1;
+ return 0;
+ }
+ while (1) {
+ struct ubifs_znode *zp;
+
+ zp = znode->parent;
+ if (!zp)
+ return -ENOENT;
+ nn = znode->iip - 1;
+ znode = zp;
+ if (nn >= 0) {
+ znode = get_znode(c, znode, nn);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ while (znode->level != 0) {
+ nn = znode->child_cnt - 1;
+ znode = get_znode(c, znode, nn);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+ nn = znode->child_cnt - 1;
+ break;
+ }
+ }
+ *zn = znode;
+ *n = nn;
+ return 0;
+}
+
+/**
+ * resolve_collision - resolve a collision.
+ * @c: UBIFS file-system description object
+ * @key: key of a directory or extended attribute entry
+ * @zn: znode is returned here
+ * @n: zbranch number is passed and returned here
+ * @nm: name of the entry
+ *
+ * This function is called for "hashed" keys to make sure that the found key
+ * really corresponds to the looked up node (directory or extended attribute
+ * entry). It returns %1 and sets @zn and @n if the collision is resolved.
+ * %0 is returned if @nm is not found and @zn and @n are set to the previous
+ * entry, i.e. to the entry after which @nm could follow if it were in TNC.
+ * This means that @n may be set to %-1 if the leftmost key in @zn is the
+ * previous one. A negative error code is returned on failures.
+ */
+static int resolve_collision(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_znode **zn, int *n,
+ const struct qstr *nm)
+{
+ int err;
+
+ err = matches_name(c, &(*zn)->zbranch[*n], nm);
+ if (unlikely(err < 0))
+ return err;
+ if (err == NAME_MATCHES)
+ return 1;
+
+ if (err == NAME_GREATER) {
+ /* Look left */
+ while (1) {
+ err = tnc_prev(c, zn, n);
+ if (err == -ENOENT) {
+ ubifs_assert(*n == 0);
+ *n = -1;
+ return 0;
+ }
+ if (err < 0)
+ return err;
+ if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
+ /*
+ * We have found the branch after which we would
+ * like to insert, but inserting in this znode
+ * may still be wrong. Consider the following 3
+ * znodes, in the case where we are resolving a
+ * collision with Key2.
+ *
+ * znode zp
+ * ----------------------
+ * level 1 | Key0 | Key1 |
+ * -----------------------
+ * | |
+ * znode za | | znode zb
+ * ------------ ------------
+ * level 0 | Key0 | | Key2 |
+ * ------------ ------------
+ *
+ * The lookup finds Key2 in znode zb. Lets say
+ * there is no match and the name is greater so
+ * we look left. When we find Key0, we end up
+ * here. If we return now, we will insert into
+ * znode za at slot n = 1. But that is invalid
+ * according to the parent's keys. Key2 must
+ * be inserted into znode zb.
+ *
+ * Note, this problem is not relevant for the
+ * case when we go right, because
+ * 'tnc_insert()' would correct the parent key.
+ */
+ if (*n == (*zn)->child_cnt - 1) {
+ err = tnc_next(c, zn, n);
+ if (err) {
+ /* Should be impossible */
+ ubifs_assert(0);
+ if (err == -ENOENT)
+ err = -EINVAL;
+ return err;
+ }
+ ubifs_assert(*n == 0);
+ *n = -1;
+ }
+ return 0;
+ }
+ err = matches_name(c, &(*zn)->zbranch[*n], nm);
+ if (err < 0)
+ return err;
+ if (err == NAME_LESS)
+ return 0;
+ if (err == NAME_MATCHES)
+ return 1;
+ ubifs_assert(err == NAME_GREATER);
+ }
+ } else {
+ int nn = *n;
+ struct ubifs_znode *znode = *zn;
+
+ /* Look right */
+ while (1) {
+ err = tnc_next(c, &znode, &nn);
+ if (err == -ENOENT)
+ return 0;
+ if (err < 0)
+ return err;
+ if (keys_cmp(c, &znode->zbranch[nn].key, key))
+ return 0;
+ err = matches_name(c, &znode->zbranch[nn], nm);
+ if (err < 0)
+ return err;
+ if (err == NAME_GREATER)
+ return 0;
+ *zn = znode;
+ *n = nn;
+ if (err == NAME_MATCHES)
+ return 1;
+ ubifs_assert(err == NAME_LESS);
+ }
+ }
+}
+
+/**
+ * fallible_matches_name - determine if a dent matches a given name.
+ * @c: UBIFS file-system description object
+ * @zbr: zbranch of dent
+ * @nm: name to match
+ *
+ * This is a "fallible" version of 'matches_name()' function which does not
+ * panic if the direntry/xentry referred by @zbr does not exist on the media.
+ *
+ * This function checks if xentry/direntry referred by zbranch @zbr matches name
+ * @nm. Returns %NAME_MATCHES it does, %NAME_LESS if the name referred by @zbr
+ * is less than @nm, %NAME_GREATER if it is greater than @nm, and @NOT_ON_MEDIA
+ * if xentry/direntry referred by @zbr does not exist on the media. A negative
+ * error code is returned in case of failure.
+ */
+static int fallible_matches_name(struct ubifs_info *c,
+ struct ubifs_zbranch *zbr,
+ const struct qstr *nm)
+{
+ struct ubifs_dent_node *dent;
+ int nlen, err;
+
+ /* If possible, match against the dent in the leaf node cache */
+ if (!zbr->leaf) {
+ dent = kmalloc(zbr->len, GFP_NOFS);
+ if (!dent)
+ return -ENOMEM;
+
+ err = fallible_read_node(c, &zbr->key, zbr, dent);
+ if (err < 0)
+ goto out_free;
+ if (err == 0) {
+ /* The node was not present */
+ err = NOT_ON_MEDIA;
+ goto out_free;
+ }
+ ubifs_assert(err == 1);
+
+ err = lnc_add_directly(c, zbr, dent);
+ if (err)
+ goto out_free;
+ } else
+ dent = zbr->leaf;
+
+ nlen = le16_to_cpu(dent->nlen);
+ err = memcmp(dent->name, nm->name, min_t(int, nlen, nm->len));
+ if (err == 0) {
+ if (nlen == nm->len)
+ return NAME_MATCHES;
+ else if (nlen < nm->len)
+ return NAME_LESS;
+ else
+ return NAME_GREATER;
+ } else if (err < 0)
+ return NAME_LESS;
+ else
+ return NAME_GREATER;
+
+out_free:
+ kfree(dent);
+ return err;
+}
+
+/**
+ * fallible_resolve_collision - resolve a collision even if nodes are missing.
+ * @c: UBIFS file-system description object
+ * @key: key
+ * @zn: znode is returned here
+ * @n: branch number is passed and returned here
+ * @nm: name of directory entry
+ * @adding: indicates caller is adding a key to the TNC
+ *
+ * This is a "fallible" version of the 'resolve_collision()' function which
+ * does not panic if one of the nodes referred to by TNC does not exist on the
+ * media. This may happen when replaying the journal if a deleted node was
+ * Garbage-collected and the commit was not done. A branch that refers to a node
+ * that is not present is called a dangling branch. The following are the return
+ * codes for this function:
+ * o if @nm was found, %1 is returned and @zn and @n are set to the found
+ * branch;
+ * o if we are @adding and @nm was not found, %0 is returned;
+ * o if we are not @adding and @nm was not found, but a dangling branch was
+ * found, then %1 is returned and @zn and @n are set to the dangling branch;
+ * o a negative error code is returned in case of failure.
+ */
+static int fallible_resolve_collision(struct ubifs_info *c,
+ const union ubifs_key *key,
+ struct ubifs_znode **zn, int *n,
+ const struct qstr *nm, int adding)
+{
+ struct ubifs_znode *o_znode = NULL, *znode = *zn;
+ int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n;
+
+ cmp = fallible_matches_name(c, &znode->zbranch[nn], nm);
+ if (unlikely(cmp < 0))
+ return cmp;
+ if (cmp == NAME_MATCHES)
+ return 1;
+ if (cmp == NOT_ON_MEDIA) {
+ o_znode = znode;
+ o_n = nn;
+ /*
+ * We are unlucky and hit a dangling branch straight away.
+ * Now we do not really know where to go to find the needed
+ * branch - to the left or to the right. Well, let's try left.
+ */
+ unsure = 1;
+ } else if (!adding)
+ unsure = 1; /* Remove a dangling branch wherever it is */
+
+ if (cmp == NAME_GREATER || unsure) {
+ /* Look left */
+ while (1) {
+ err = tnc_prev(c, zn, n);
+ if (err == -ENOENT) {
+ ubifs_assert(*n == 0);
+ *n = -1;
+ break;
+ }
+ if (err < 0)
+ return err;
+ if (keys_cmp(c, &(*zn)->zbranch[*n].key, key)) {
+ /* See comments in 'resolve_collision()' */
+ if (*n == (*zn)->child_cnt - 1) {
+ err = tnc_next(c, zn, n);
+ if (err) {
+ /* Should be impossible */
+ ubifs_assert(0);
+ if (err == -ENOENT)
+ err = -EINVAL;
+ return err;
+ }
+ ubifs_assert(*n == 0);
+ *n = -1;
+ }
+ break;
+ }
+ err = fallible_matches_name(c, &(*zn)->zbranch[*n], nm);
+ if (err < 0)
+ return err;
+ if (err == NAME_MATCHES)
+ return 1;
+ if (err == NOT_ON_MEDIA) {
+ o_znode = *zn;
+ o_n = *n;
+ continue;
+ }
+ if (!adding)
+ continue;
+ if (err == NAME_LESS)
+ break;
+ else
+ unsure = 0;
+ }
+ }
+
+ if (cmp == NAME_LESS || unsure) {
+ /* Look right */
+ *zn = znode;
+ *n = nn;
+ while (1) {
+ err = tnc_next(c, &znode, &nn);
+ if (err == -ENOENT)
+ break;
+ if (err < 0)
+ return err;
+ if (keys_cmp(c, &znode->zbranch[nn].key, key))
+ break;
+ err = fallible_matches_name(c, &znode->zbranch[nn], nm);
+ if (err < 0)
+ return err;
+ if (err == NAME_GREATER)
+ break;
+ *zn = znode;
+ *n = nn;
+ if (err == NAME_MATCHES)
+ return 1;
+ if (err == NOT_ON_MEDIA) {
+ o_znode = znode;
+ o_n = nn;
+ }
+ }
+ }
+
+ /* Never match a dangling branch when adding */
+ if (adding || !o_znode)
+ return 0;
+
+ dbg_mnt("dangling match LEB %d:%d len %d %s",
+ o_znode->zbranch[o_n].lnum, o_znode->zbranch[o_n].offs,
+ o_znode->zbranch[o_n].len, DBGKEY(key));
+ *zn = o_znode;
+ *n = o_n;
+ return 1;
+}
+
+/**
+ * matches_position - determine if a zbranch matches a given position.
+ * @zbr: zbranch of dent
+ * @lnum: LEB number of dent to match
+ * @offs: offset of dent to match
+ *
+ * This function returns %1 if @lnum:@offs matches, and %0 otherwise.
+ */
+static int matches_position(struct ubifs_zbranch *zbr, int lnum, int offs)
+{
+ if (zbr->lnum == lnum && zbr->offs == offs)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * resolve_collision_directly - resolve a collision directly.
+ * @c: UBIFS file-system description object
+ * @key: key of directory entry
+ * @zn: znode is passed and returned here
+ * @n: zbranch number is passed and returned here
+ * @lnum: LEB number of dent node to match
+ * @offs: offset of dent node to match
+ *
+ * This function is used for "hashed" keys to make sure the found directory or
+ * extended attribute entry node is what was looked for. It is used when the
+ * flash address of the right node is known (@lnum:@offs) which makes it much
+ * easier to resolve collisions (no need to read entries and match full
+ * names). This function returns %1 and sets @zn and @n if the collision is
+ * resolved, %0 if @lnum:@offs is not found and @zn and @n are set to the
+ * previous directory entry. Otherwise a negative error code is returned.
+ */
+static int resolve_collision_directly(struct ubifs_info *c,
+ const union ubifs_key *key,
+ struct ubifs_znode **zn, int *n,
+ int lnum, int offs)
+{
+ struct ubifs_znode *znode;
+ int nn, err;
+
+ znode = *zn;
+ nn = *n;
+ if (matches_position(&znode->zbranch[nn], lnum, offs))
+ return 1;
+
+ /* Look left */
+ while (1) {
+ err = tnc_prev(c, &znode, &nn);
+ if (err == -ENOENT)
+ break;
+ if (err < 0)
+ return err;
+ if (keys_cmp(c, &znode->zbranch[nn].key, key))
+ break;
+ if (matches_position(&znode->zbranch[nn], lnum, offs)) {
+ *zn = znode;
+ *n = nn;
+ return 1;
+ }
+ }
+
+ /* Look right */
+ znode = *zn;
+ nn = *n;
+ while (1) {
+ err = tnc_next(c, &znode, &nn);
+ if (err == -ENOENT)
+ return 0;
+ if (err < 0)
+ return err;
+ if (keys_cmp(c, &znode->zbranch[nn].key, key))
+ return 0;
+ *zn = znode;
+ *n = nn;
+ if (matches_position(&znode->zbranch[nn], lnum, offs))
+ return 1;
+ }
+}
+
+/**
+ * dirty_cow_bottom_up - dirty a znode and its ancestors.
+ * @c: UBIFS file-system description object
+ * @znode: znode to dirty
+ *
+ * If we do not have a unique key that resides in a znode, then we cannot
+ * dirty that znode from the top down (i.e. by using lookup_level0_dirty)
+ * This function records the path back to the last dirty ancestor, and then
+ * dirties the znodes on that path.
+ */
+static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
+ struct ubifs_znode *znode)
+{
+ struct ubifs_znode *zp;
+ int *path = c->bottom_up_buf, p = 0;
+
+ ubifs_assert(c->zroot.znode);
+ ubifs_assert(znode);
+ if (c->zroot.znode->level > BOTTOM_UP_HEIGHT) {
+ kfree(c->bottom_up_buf);
+ c->bottom_up_buf = kmalloc(c->zroot.znode->level * sizeof(int),
+ GFP_NOFS);
+ if (!c->bottom_up_buf)
+ return ERR_PTR(-ENOMEM);
+ path = c->bottom_up_buf;
+ }
+ if (c->zroot.znode->level) {
+ /* Go up until parent is dirty */
+ while (1) {
+ int n;
+
+ zp = znode->parent;
+ if (!zp)
+ break;
+ n = znode->iip;
+ ubifs_assert(p < c->zroot.znode->level);
+ path[p++] = n;
+ if (!zp->cnext && ubifs_zn_dirty(znode))
+ break;
+ znode = zp;
+ }
+ }
+
+ /* Come back down, dirtying as we go */
+ while (1) {
+ struct ubifs_zbranch *zbr;
+
+ zp = znode->parent;
+ if (zp) {
+ ubifs_assert(path[p - 1] >= 0);
+ ubifs_assert(path[p - 1] < zp->child_cnt);
+ zbr = &zp->zbranch[path[--p]];
+ znode = dirty_cow_znode(c, zbr);
+ } else {
+ ubifs_assert(znode == c->zroot.znode);
+ znode = dirty_cow_znode(c, &c->zroot);
+ }
+ if (IS_ERR(znode) || !p)
+ break;
+ ubifs_assert(path[p - 1] >= 0);
+ ubifs_assert(path[p - 1] < znode->child_cnt);
+ znode = znode->zbranch[path[p - 1]].znode;
+ }
+
+ return znode;
+}
+
+/**
+ * ubifs_lookup_level0 - search for zero-level znode.
+ * @c: UBIFS file-system description object
+ * @key: key to lookup
+ * @zn: znode is returned here
+ * @n: znode branch slot number is returned here
+ *
+ * This function looks up the TNC tree and search for zero-level znode which
+ * refers key @key. The found zero-level znode is returned in @zn. There are 3
+ * cases:
+ * o exact match, i.e. the found zero-level znode contains key @key, then %1
+ * is returned and slot number of the matched branch is stored in @n;
+ * o not exact match, which means that zero-level znode does not contain
+ * @key, then %0 is returned and slot number of the closed branch is stored
+ * in @n;
+ * o @key is so small that it is even less than the lowest key of the
+ * leftmost zero-level node, then %0 is returned and %0 is stored in @n.
+ *
+ * Note, when the TNC tree is traversed, some znodes may be absent, then this
+ * function reads corresponding indexing nodes and inserts them to TNC. In
+ * case of failure, a negative error code is returned.
+ */
+int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_znode **zn, int *n)
+{
+ int err, exact;
+ struct ubifs_znode *znode;
+ unsigned long time = get_seconds();
+
+ dbg_tnc("search key %s", DBGKEY(key));
+
+ znode = c->zroot.znode;
+ if (unlikely(!znode)) {
+ znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+
+ znode->time = time;
+
+ while (1) {
+ struct ubifs_zbranch *zbr;
+
+ exact = ubifs_search_zbranch(c, znode, key, n);
+
+ if (znode->level == 0)
+ break;
+
+ if (*n < 0)
+ *n = 0;
+ zbr = &znode->zbranch[*n];
+
+ if (zbr->znode) {
+ znode->time = time;
+ znode = zbr->znode;
+ continue;
+ }
+
+ /* znode is not in TNC cache, load it from the media */
+ znode = ubifs_load_znode(c, zbr, znode, *n);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+
+ *zn = znode;
+ if (exact || !is_hash_key(c, key) || *n != -1) {
+ dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
+ return exact;
+ }
+
+ /*
+ * Here is a tricky place. We have not found the key and this is a
+ * "hashed" key, which may collide. The rest of the code deals with
+ * situations like this:
+ *
+ * | 3 | 5 |
+ * / \
+ * | 3 | 5 | | 6 | 7 | (x)
+ *
+ * Or more a complex example:
+ *
+ * | 1 | 5 |
+ * / \
+ * | 1 | 3 | | 5 | 8 |
+ * \ /
+ * | 5 | 5 | | 6 | 7 | (x)
+ *
+ * In the examples, if we are looking for key "5", we may reach nodes
+ * marked with "(x)". In this case what we have do is to look at the
+ * left and see if there is "5" key there. If there is, we have to
+ * return it.
+ *
+ * Note, this whole situation is possible because we allow to have
+ * elements which are equivalent to the next key in the parent in the
+ * children of current znode. For example, this happens if we split a
+ * znode like this: | 3 | 5 | 5 | 6 | 7 |, which results in something
+ * like this:
+ * | 3 | 5 |
+ * / \
+ * | 3 | 5 | | 5 | 6 | 7 |
+ * ^
+ * And this becomes what is at the first "picture" after key "5" marked
+ * with "^" is removed. What could be done is we could prohibit
+ * splitting in the middle of the colliding sequence. Also, when
+ * removing the leftmost key, we would have to correct the key of the
+ * parent node, which would introduce additional complications. Namely,
+ * if we changed the the leftmost key of the parent znode, the garbage
+ * collector would be unable to find it (GC is doing this when GC'ing
+ * indexing LEBs). Although we already have an additional RB-tree where
+ * we save such changed znodes (see 'ins_clr_old_idx_znode()') until
+ * after the commit. But anyway, this does not look easy to implement
+ * so we did not try this.
+ */
+ err = tnc_prev(c, &znode, n);
+ if (err == -ENOENT) {
+ dbg_tnc("found 0, lvl %d, n -1", znode->level);
+ *n = -1;
+ return 0;
+ }
+ if (unlikely(err < 0))
+ return err;
+ if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
+ dbg_tnc("found 0, lvl %d, n -1", znode->level);
+ *n = -1;
+ return 0;
+ }
+
+ dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
+ *zn = znode;
+ return 1;
+}
+
+/**
+ * lookup_level0_dirty - search for zero-level znode dirtying.
+ * @c: UBIFS file-system description object
+ * @key: key to lookup
+ * @zn: znode is returned here
+ * @n: znode branch slot number is returned here
+ *
+ * This function looks up the TNC tree and search for zero-level znode which
+ * refers key @key. The found zero-level znode is returned in @zn. There are 3
+ * cases:
+ * o exact match, i.e. the found zero-level znode contains key @key, then %1
+ * is returned and slot number of the matched branch is stored in @n;
+ * o not exact match, which means that zero-level znode does not contain @key
+ * then %0 is returned and slot number of the closed branch is stored in
+ * @n;
+ * o @key is so small that it is even less than the lowest key of the
+ * leftmost zero-level node, then %0 is returned and %-1 is stored in @n.
+ *
+ * Additionally all znodes in the path from the root to the located zero-level
+ * znode are marked as dirty.
+ *
+ * Note, when the TNC tree is traversed, some znodes may be absent, then this
+ * function reads corresponding indexing nodes and inserts them to TNC. In
+ * case of failure, a negative error code is returned.
+ */
+static int lookup_level0_dirty(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_znode **zn, int *n)
+{
+ int err, exact;
+ struct ubifs_znode *znode;
+ unsigned long time = get_seconds();
+
+ dbg_tnc("search and dirty key %s", DBGKEY(key));
+
+ znode = c->zroot.znode;
+ if (unlikely(!znode)) {
+ znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+
+ znode = dirty_cow_znode(c, &c->zroot);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+
+ znode->time = time;
+
+ while (1) {
+ struct ubifs_zbranch *zbr;
+
+ exact = ubifs_search_zbranch(c, znode, key, n);
+
+ if (znode->level == 0)
+ break;
+
+ if (*n < 0)
+ *n = 0;
+ zbr = &znode->zbranch[*n];
+
+ if (zbr->znode) {
+ znode->time = time;
+ znode = dirty_cow_znode(c, zbr);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ continue;
+ }
+
+ /* znode is not in TNC cache, load it from the media */
+ znode = ubifs_load_znode(c, zbr, znode, *n);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ znode = dirty_cow_znode(c, zbr);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+
+ *zn = znode;
+ if (exact || !is_hash_key(c, key) || *n != -1) {
+ dbg_tnc("found %d, lvl %d, n %d", exact, znode->level, *n);
+ return exact;
+ }
+
+ /*
+ * See huge comment at 'lookup_level0_dirty()' what is the rest of the
+ * code.
+ */
+ err = tnc_prev(c, &znode, n);
+ if (err == -ENOENT) {
+ *n = -1;
+ dbg_tnc("found 0, lvl %d, n -1", znode->level);
+ return 0;
+ }
+ if (unlikely(err < 0))
+ return err;
+ if (keys_cmp(c, key, &znode->zbranch[*n].key)) {
+ *n = -1;
+ dbg_tnc("found 0, lvl %d, n -1", znode->level);
+ return 0;
+ }
+
+ if (znode->cnext || !ubifs_zn_dirty(znode)) {
+ znode = dirty_cow_bottom_up(c, znode);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ }
+
+ dbg_tnc("found 1, lvl %d, n %d", znode->level, *n);
+ *zn = znode;
+ return 1;
+}
+
+/**
+ * maybe_leb_gced - determine if a LEB may have been garbage collected.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number
+ * @gc_seq1: garbage collection sequence number
+ *
+ * This function determines if @lnum may have been garbage collected since
+ * sequence number @gc_seq1. If it may have been then %1 is returned, otherwise
+ * %0 is returned.
+ */
+static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
+{
+ /*
+ * No garbage collection in the read-only U-Boot implementation
+ */
+ return 0;
+}
+
+/**
+ * ubifs_tnc_locate - look up a file-system node and return it and its location.
+ * @c: UBIFS file-system description object
+ * @key: node key to lookup
+ * @node: the node is returned here
+ * @lnum: LEB number is returned here
+ * @offs: offset is returned here
+ *
+ * This function look up and reads node with key @key. The caller has to make
+ * sure the @node buffer is large enough to fit the node. Returns zero in case
+ * of success, %-ENOENT if the node was not found, and a negative error code in
+ * case of failure. The node location can be returned in @lnum and @offs.
+ */
+int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
+ void *node, int *lnum, int *offs)
+{
+ int found, n, err, safely = 0, gc_seq1;
+ struct ubifs_znode *znode;
+ struct ubifs_zbranch zbr, *zt;
+
+again:
+ mutex_lock(&c->tnc_mutex);
+ found = ubifs_lookup_level0(c, key, &znode, &n);
+ if (!found) {
+ err = -ENOENT;
+ goto out;
+ } else if (found < 0) {
+ err = found;
+ goto out;
+ }
+ zt = &znode->zbranch[n];
+ if (lnum) {
+ *lnum = zt->lnum;
+ *offs = zt->offs;
+ }
+ if (is_hash_key(c, key)) {
+ /*
+ * In this case the leaf node cache gets used, so we pass the
+ * address of the zbranch and keep the mutex locked
+ */
+ err = tnc_read_node_nm(c, zt, node);
+ goto out;
+ }
+ if (safely) {
+ err = ubifs_tnc_read_node(c, zt, node);
+ goto out;
+ }
+ /* Drop the TNC mutex prematurely and race with garbage collection */
+ zbr = znode->zbranch[n];
+ gc_seq1 = c->gc_seq;
+ mutex_unlock(&c->tnc_mutex);
+
+ err = fallible_read_node(c, key, &zbr, node);
+ if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) {
+ /*
+ * The node may have been GC'ed out from under us so try again
+ * while keeping the TNC mutex locked.
+ */
+ safely = 1;
+ goto again;
+ }
+ return 0;
+
+out:
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * ubifs_tnc_get_bu_keys - lookup keys for bulk-read.
+ * @c: UBIFS file-system description object
+ * @bu: bulk-read parameters and results
+ *
+ * Lookup consecutive data node keys for the same inode that reside
+ * consecutively in the same LEB. This function returns zero in case of success
+ * and a negative error code in case of failure.
+ *
+ * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function
+ * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares
+ * maximum possible amount of nodes for bulk-read.
+ */
+int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
+{
+ int n, err = 0, lnum = -1, uninitialized_var(offs);
+ int uninitialized_var(len);
+ unsigned int block = key_block(c, &bu->key);
+ struct ubifs_znode *znode;
+
+ bu->cnt = 0;
+ bu->blk_cnt = 0;
+ bu->eof = 0;
+
+ mutex_lock(&c->tnc_mutex);
+ /* Find first key */
+ err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
+ if (err < 0)
+ goto out;
+ if (err) {
+ /* Key found */
+ len = znode->zbranch[n].len;
+ /* The buffer must be big enough for at least 1 node */
+ if (len > bu->buf_len) {
+ err = -EINVAL;
+ goto out;
+ }
+ /* Add this key */
+ bu->zbranch[bu->cnt++] = znode->zbranch[n];
+ bu->blk_cnt += 1;
+ lnum = znode->zbranch[n].lnum;
+ offs = ALIGN(znode->zbranch[n].offs + len, 8);
+ }
+ while (1) {
+ struct ubifs_zbranch *zbr;
+ union ubifs_key *key;
+ unsigned int next_block;
+
+ /* Find next key */
+ err = tnc_next(c, &znode, &n);
+ if (err)
+ goto out;
+ zbr = &znode->zbranch[n];
+ key = &zbr->key;
+ /* See if there is another data key for this file */
+ if (key_inum(c, key) != key_inum(c, &bu->key) ||
+ key_type(c, key) != UBIFS_DATA_KEY) {
+ err = -ENOENT;
+ goto out;
+ }
+ if (lnum < 0) {
+ /* First key found */
+ lnum = zbr->lnum;
+ offs = ALIGN(zbr->offs + zbr->len, 8);
+ len = zbr->len;
+ if (len > bu->buf_len) {
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ /*
+ * The data nodes must be in consecutive positions in
+ * the same LEB.
+ */
+ if (zbr->lnum != lnum || zbr->offs != offs)
+ goto out;
+ offs += ALIGN(zbr->len, 8);
+ len = ALIGN(len, 8) + zbr->len;
+ /* Must not exceed buffer length */
+ if (len > bu->buf_len)
+ goto out;
+ }
+ /* Allow for holes */
+ next_block = key_block(c, key);
+ bu->blk_cnt += (next_block - block - 1);
+ if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
+ goto out;
+ block = next_block;
+ /* Add this key */
+ bu->zbranch[bu->cnt++] = *zbr;
+ bu->blk_cnt += 1;
+ /* See if we have room for more */
+ if (bu->cnt >= UBIFS_MAX_BULK_READ)
+ goto out;
+ if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
+ goto out;
+ }
+out:
+ if (err == -ENOENT) {
+ bu->eof = 1;
+ err = 0;
+ }
+ bu->gc_seq = c->gc_seq;
+ mutex_unlock(&c->tnc_mutex);
+ if (err)
+ return err;
+ /*
+ * An enormous hole could cause bulk-read to encompass too many
+ * page cache pages, so limit the number here.
+ */
+ if (bu->blk_cnt > UBIFS_MAX_BULK_READ)
+ bu->blk_cnt = UBIFS_MAX_BULK_READ;
+ /*
+ * Ensure that bulk-read covers a whole number of page cache
+ * pages.
+ */
+ if (UBIFS_BLOCKS_PER_PAGE == 1 ||
+ !(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
+ return 0;
+ if (bu->eof) {
+ /* At the end of file we can round up */
+ bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
+ return 0;
+ }
+ /* Exclude data nodes that do not make up a whole page cache page */
+ block = key_block(c, &bu->key) + bu->blk_cnt;
+ block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
+ while (bu->cnt) {
+ if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
+ break;
+ bu->cnt -= 1;
+ }
+ return 0;
+}
+
+/**
+ * validate_data_node - validate data nodes for bulk-read.
+ * @c: UBIFS file-system description object
+ * @buf: buffer containing data node to validate
+ * @zbr: zbranch of data node to validate
+ *
+ * This functions returns %0 on success or a negative error code on failure.
+ */
+static int validate_data_node(struct ubifs_info *c, void *buf,
+ struct ubifs_zbranch *zbr)
+{
+ union ubifs_key key1;
+ struct ubifs_ch *ch = buf;
+ int err, len;
+
+ if (ch->node_type != UBIFS_DATA_NODE) {
+ ubifs_err("bad node type (%d but expected %d)",
+ ch->node_type, UBIFS_DATA_NODE);
+ goto out_err;
+ }
+
+ err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0, 0);
+ if (err) {
+ ubifs_err("expected node type %d", UBIFS_DATA_NODE);
+ goto out;
+ }
+
+ len = le32_to_cpu(ch->len);
+ if (len != zbr->len) {
+ ubifs_err("bad node length %d, expected %d", len, zbr->len);
+ goto out_err;
+ }
+
+ /* Make sure the key of the read node is correct */
+ key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
+ if (!keys_eq(c, &zbr->key, &key1)) {
+ ubifs_err("bad key in node at LEB %d:%d",
+ zbr->lnum, zbr->offs);
+ dbg_tnc("looked for key %s found node's key %s",
+ DBGKEY(&zbr->key), DBGKEY1(&key1));
+ goto out_err;
+ }
+
+ return 0;
+
+out_err:
+ err = -EINVAL;
+out:
+ ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
+ dbg_dump_node(c, buf);
+ dbg_dump_stack();
+ return err;
+}
+
+/**
+ * ubifs_tnc_bulk_read - read a number of data nodes in one go.
+ * @c: UBIFS file-system description object
+ * @bu: bulk-read parameters and results
+ *
+ * This functions reads and validates the data nodes that were identified by the
+ * 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success,
+ * -EAGAIN to indicate a race with GC, or another negative error code on
+ * failure.
+ */
+int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
+{
+ int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
+ void *buf;
+
+ len = bu->zbranch[bu->cnt - 1].offs;
+ len += bu->zbranch[bu->cnt - 1].len - offs;
+ if (len > bu->buf_len) {
+ ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
+ return -EINVAL;
+ }
+
+ /* Do the read */
+ err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
+
+ /* Check for a race with GC */
+ if (maybe_leb_gced(c, lnum, bu->gc_seq))
+ return -EAGAIN;
+
+ if (err && err != -EBADMSG) {
+ ubifs_err("failed to read from LEB %d:%d, error %d",
+ lnum, offs, err);
+ dbg_dump_stack();
+ dbg_tnc("key %s", DBGKEY(&bu->key));
+ return err;
+ }
+
+ /* Validate the nodes read */
+ buf = bu->buf;
+ for (i = 0; i < bu->cnt; i++) {
+ err = validate_data_node(c, buf, &bu->zbranch[i]);
+ if (err)
+ return err;
+ buf = buf + ALIGN(bu->zbranch[i].len, 8);
+ }
+
+ return 0;
+}
+
+/**
+ * do_lookup_nm- look up a "hashed" node.
+ * @c: UBIFS file-system description object
+ * @key: node key to lookup
+ * @node: the node is returned here
+ * @nm: node name
+ *
+ * This function look up and reads a node which contains name hash in the key.
+ * Since the hash may have collisions, there may be many nodes with the same
+ * key, so we have to sequentially look to all of them until the needed one is
+ * found. This function returns zero in case of success, %-ENOENT if the node
+ * was not found, and a negative error code in case of failure.
+ */
+static int do_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
+ void *node, const struct qstr *nm)
+{
+ int found, n, err;
+ struct ubifs_znode *znode;
+
+ dbg_tnc("name '%.*s' key %s", nm->len, nm->name, DBGKEY(key));
+ mutex_lock(&c->tnc_mutex);
+ found = ubifs_lookup_level0(c, key, &znode, &n);
+ if (!found) {
+ err = -ENOENT;
+ goto out_unlock;
+ } else if (found < 0) {
+ err = found;
+ goto out_unlock;
+ }
+
+ ubifs_assert(n >= 0);
+
+ err = resolve_collision(c, key, &znode, &n, nm);
+ dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
+ if (unlikely(err < 0))
+ goto out_unlock;
+ if (err == 0) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
+
+ err = tnc_read_node_nm(c, &znode->zbranch[n], node);
+
+out_unlock:
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * ubifs_tnc_lookup_nm - look up a "hashed" node.
+ * @c: UBIFS file-system description object
+ * @key: node key to lookup
+ * @node: the node is returned here
+ * @nm: node name
+ *
+ * This function look up and reads a node which contains name hash in the key.
+ * Since the hash may have collisions, there may be many nodes with the same
+ * key, so we have to sequentially look to all of them until the needed one is
+ * found. This function returns zero in case of success, %-ENOENT if the node
+ * was not found, and a negative error code in case of failure.
+ */
+int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
+ void *node, const struct qstr *nm)
+{
+ int err, len;
+ const struct ubifs_dent_node *dent = node;
+
+ /*
+ * We assume that in most of the cases there are no name collisions and
+ * 'ubifs_tnc_lookup()' returns us the right direntry.
+ */
+ err = ubifs_tnc_lookup(c, key, node);
+ if (err)
+ return err;
+
+ len = le16_to_cpu(dent->nlen);
+ if (nm->len == len && !memcmp(dent->name, nm->name, len))
+ return 0;
+
+ /*
+ * Unluckily, there are hash collisions and we have to iterate over
+ * them look at each direntry with colliding name hash sequentially.
+ */
+ return do_lookup_nm(c, key, node, nm);
+}
+
+/**
+ * correct_parent_keys - correct parent znodes' keys.
+ * @c: UBIFS file-system description object
+ * @znode: znode to correct parent znodes for
+ *
+ * This is a helper function for 'tnc_insert()'. When the key of the leftmost
+ * zbranch changes, keys of parent znodes have to be corrected. This helper
+ * function is called in such situations and corrects the keys if needed.
+ */
+static void correct_parent_keys(const struct ubifs_info *c,
+ struct ubifs_znode *znode)
+{
+ union ubifs_key *key, *key1;
+
+ ubifs_assert(znode->parent);
+ ubifs_assert(znode->iip == 0);
+
+ key = &znode->zbranch[0].key;
+ key1 = &znode->parent->zbranch[0].key;
+
+ while (keys_cmp(c, key, key1) < 0) {
+ key_copy(c, key, key1);
+ znode = znode->parent;
+ znode->alt = 1;
+ if (!znode->parent || znode->iip)
+ break;
+ key1 = &znode->parent->zbranch[0].key;
+ }
+}
+
+/**
+ * insert_zbranch - insert a zbranch into a znode.
+ * @znode: znode into which to insert
+ * @zbr: zbranch to insert
+ * @n: slot number to insert to
+ *
+ * This is a helper function for 'tnc_insert()'. UBIFS does not allow "gaps" in
+ * znode's array of zbranches and keeps zbranches consolidated, so when a new
+ * zbranch has to be inserted to the @znode->zbranches[]' array at the @n-th
+ * slot, zbranches starting from @n have to be moved right.
+ */
+static void insert_zbranch(struct ubifs_znode *znode,
+ const struct ubifs_zbranch *zbr, int n)
+{
+ int i;
+
+ ubifs_assert(ubifs_zn_dirty(znode));
+
+ if (znode->level) {
+ for (i = znode->child_cnt; i > n; i--) {
+ znode->zbranch[i] = znode->zbranch[i - 1];
+ if (znode->zbranch[i].znode)
+ znode->zbranch[i].znode->iip = i;
+ }
+ if (zbr->znode)
+ zbr->znode->iip = n;
+ } else
+ for (i = znode->child_cnt; i > n; i--)
+ znode->zbranch[i] = znode->zbranch[i - 1];
+
+ znode->zbranch[n] = *zbr;
+ znode->child_cnt += 1;
+
+ /*
+ * After inserting at slot zero, the lower bound of the key range of
+ * this znode may have changed. If this znode is subsequently split
+ * then the upper bound of the key range may change, and furthermore
+ * it could change to be lower than the original lower bound. If that
+ * happens, then it will no longer be possible to find this znode in the
+ * TNC using the key from the index node on flash. That is bad because
+ * if it is not found, we will assume it is obsolete and may overwrite
+ * it. Then if there is an unclean unmount, we will start using the
+ * old index which will be broken.
+ *
+ * So we first mark znodes that have insertions at slot zero, and then
+ * if they are split we add their lnum/offs to the old_idx tree.
+ */
+ if (n == 0)
+ znode->alt = 1;
+}
+
+/**
+ * tnc_insert - insert a node into TNC.
+ * @c: UBIFS file-system description object
+ * @znode: znode to insert into
+ * @zbr: branch to insert
+ * @n: slot number to insert new zbranch to
+ *
+ * This function inserts a new node described by @zbr into znode @znode. If
+ * znode does not have a free slot for new zbranch, it is split. Parent znodes
+ * are splat as well if needed. Returns zero in case of success or a negative
+ * error code in case of failure.
+ */
+static int tnc_insert(struct ubifs_info *c, struct ubifs_znode *znode,
+ struct ubifs_zbranch *zbr, int n)
+{
+ struct ubifs_znode *zn, *zi, *zp;
+ int i, keep, move, appending = 0;
+ union ubifs_key *key = &zbr->key, *key1;
+
+ ubifs_assert(n >= 0 && n <= c->fanout);
+
+ /* Implement naive insert for now */
+again:
+ zp = znode->parent;
+ if (znode->child_cnt < c->fanout) {
+ ubifs_assert(n != c->fanout);
+ dbg_tnc("inserted at %d level %d, key %s", n, znode->level,
+ DBGKEY(key));
+
+ insert_zbranch(znode, zbr, n);
+
+ /* Ensure parent's key is correct */
+ if (n == 0 && zp && znode->iip == 0)
+ correct_parent_keys(c, znode);
+
+ return 0;
+ }
+
+ /*
+ * Unfortunately, @znode does not have more empty slots and we have to
+ * split it.
+ */
+ dbg_tnc("splitting level %d, key %s", znode->level, DBGKEY(key));
+
+ if (znode->alt)
+ /*
+ * We can no longer be sure of finding this znode by key, so we
+ * record it in the old_idx tree.
+ */
+ ins_clr_old_idx_znode(c, znode);
+
+ zn = kzalloc(c->max_znode_sz, GFP_NOFS);
+ if (!zn)
+ return -ENOMEM;
+ zn->parent = zp;
+ zn->level = znode->level;
+
+ /* Decide where to split */
+ if (znode->level == 0 && key_type(c, key) == UBIFS_DATA_KEY) {
+ /* Try not to split consecutive data keys */
+ if (n == c->fanout) {
+ key1 = &znode->zbranch[n - 1].key;
+ if (key_inum(c, key1) == key_inum(c, key) &&
+ key_type(c, key1) == UBIFS_DATA_KEY)
+ appending = 1;
+ } else
+ goto check_split;
+ } else if (appending && n != c->fanout) {
+ /* Try not to split consecutive data keys */
+ appending = 0;
+check_split:
+ if (n >= (c->fanout + 1) / 2) {
+ key1 = &znode->zbranch[0].key;
+ if (key_inum(c, key1) == key_inum(c, key) &&
+ key_type(c, key1) == UBIFS_DATA_KEY) {
+ key1 = &znode->zbranch[n].key;
+ if (key_inum(c, key1) != key_inum(c, key) ||
+ key_type(c, key1) != UBIFS_DATA_KEY) {
+ keep = n;
+ move = c->fanout - keep;
+ zi = znode;
+ goto do_split;
+ }
+ }
+ }
+ }
+
+ if (appending) {
+ keep = c->fanout;
+ move = 0;
+ } else {
+ keep = (c->fanout + 1) / 2;
+ move = c->fanout - keep;
+ }
+
+ /*
+ * Although we don't at present, we could look at the neighbors and see
+ * if we can move some zbranches there.
+ */
+
+ if (n < keep) {
+ /* Insert into existing znode */
+ zi = znode;
+ move += 1;
+ keep -= 1;
+ } else {
+ /* Insert into new znode */
+ zi = zn;
+ n -= keep;
+ /* Re-parent */
+ if (zn->level != 0)
+ zbr->znode->parent = zn;
+ }
+
+do_split:
+
+ __set_bit(DIRTY_ZNODE, &zn->flags);
+ atomic_long_inc(&c->dirty_zn_cnt);
+
+ zn->child_cnt = move;
+ znode->child_cnt = keep;
+
+ dbg_tnc("moving %d, keeping %d", move, keep);
+
+ /* Move zbranch */
+ for (i = 0; i < move; i++) {
+ zn->zbranch[i] = znode->zbranch[keep + i];
+ /* Re-parent */
+ if (zn->level != 0)
+ if (zn->zbranch[i].znode) {
+ zn->zbranch[i].znode->parent = zn;
+ zn->zbranch[i].znode->iip = i;
+ }
+ }
+
+ /* Insert new key and branch */
+ dbg_tnc("inserting at %d level %d, key %s", n, zn->level, DBGKEY(key));
+
+ insert_zbranch(zi, zbr, n);
+
+ /* Insert new znode (produced by spitting) into the parent */
+ if (zp) {
+ if (n == 0 && zi == znode && znode->iip == 0)
+ correct_parent_keys(c, znode);
+
+ /* Locate insertion point */
+ n = znode->iip + 1;
+
+ /* Tail recursion */
+ zbr->key = zn->zbranch[0].key;
+ zbr->znode = zn;
+ zbr->lnum = 0;
+ zbr->offs = 0;
+ zbr->len = 0;
+ znode = zp;
+
+ goto again;
+ }
+
+ /* We have to split root znode */
+ dbg_tnc("creating new zroot at level %d", znode->level + 1);
+
+ zi = kzalloc(c->max_znode_sz, GFP_NOFS);
+ if (!zi)
+ return -ENOMEM;
+
+ zi->child_cnt = 2;
+ zi->level = znode->level + 1;
+
+ __set_bit(DIRTY_ZNODE, &zi->flags);
+ atomic_long_inc(&c->dirty_zn_cnt);
+
+ zi->zbranch[0].key = znode->zbranch[0].key;
+ zi->zbranch[0].znode = znode;
+ zi->zbranch[0].lnum = c->zroot.lnum;
+ zi->zbranch[0].offs = c->zroot.offs;
+ zi->zbranch[0].len = c->zroot.len;
+ zi->zbranch[1].key = zn->zbranch[0].key;
+ zi->zbranch[1].znode = zn;
+
+ c->zroot.lnum = 0;
+ c->zroot.offs = 0;
+ c->zroot.len = 0;
+ c->zroot.znode = zi;
+
+ zn->parent = zi;
+ zn->iip = 1;
+ znode->parent = zi;
+ znode->iip = 0;
+
+ return 0;
+}
+
+/**
+ * ubifs_tnc_add - add a node to TNC.
+ * @c: UBIFS file-system description object
+ * @key: key to add
+ * @lnum: LEB number of node
+ * @offs: node offset
+ * @len: node length
+ *
+ * This function adds a node with key @key to TNC. The node may be new or it may
+ * obsolete some existing one. Returns %0 on success or negative error code on
+ * failure.
+ */
+int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
+ int offs, int len)
+{
+ int found, n, err = 0;
+ struct ubifs_znode *znode;
+
+ mutex_lock(&c->tnc_mutex);
+ dbg_tnc("%d:%d, len %d, key %s", lnum, offs, len, DBGKEY(key));
+ found = lookup_level0_dirty(c, key, &znode, &n);
+ if (!found) {
+ struct ubifs_zbranch zbr;
+
+ zbr.znode = NULL;
+ zbr.lnum = lnum;
+ zbr.offs = offs;
+ zbr.len = len;
+ key_copy(c, key, &zbr.key);
+ err = tnc_insert(c, znode, &zbr, n + 1);
+ } else if (found == 1) {
+ struct ubifs_zbranch *zbr = &znode->zbranch[n];
+
+ lnc_free(zbr);
+ err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
+ zbr->lnum = lnum;
+ zbr->offs = offs;
+ zbr->len = len;
+ } else
+ err = found;
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+ mutex_unlock(&c->tnc_mutex);
+
+ return err;
+}
+
+/**
+ * ubifs_tnc_replace - replace a node in the TNC only if the old node is found.
+ * @c: UBIFS file-system description object
+ * @key: key to add
+ * @old_lnum: LEB number of old node
+ * @old_offs: old node offset
+ * @lnum: LEB number of node
+ * @offs: node offset
+ * @len: node length
+ *
+ * This function replaces a node with key @key in the TNC only if the old node
+ * is found. This function is called by garbage collection when node are moved.
+ * Returns %0 on success or negative error code on failure.
+ */
+int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
+ int old_lnum, int old_offs, int lnum, int offs, int len)
+{
+ int found, n, err = 0;
+ struct ubifs_znode *znode;
+
+ mutex_lock(&c->tnc_mutex);
+ dbg_tnc("old LEB %d:%d, new LEB %d:%d, len %d, key %s", old_lnum,
+ old_offs, lnum, offs, len, DBGKEY(key));
+ found = lookup_level0_dirty(c, key, &znode, &n);
+ if (found < 0) {
+ err = found;
+ goto out_unlock;
+ }
+
+ if (found == 1) {
+ struct ubifs_zbranch *zbr = &znode->zbranch[n];
+
+ found = 0;
+ if (zbr->lnum == old_lnum && zbr->offs == old_offs) {
+ lnc_free(zbr);
+ err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
+ if (err)
+ goto out_unlock;
+ zbr->lnum = lnum;
+ zbr->offs = offs;
+ zbr->len = len;
+ found = 1;
+ } else if (is_hash_key(c, key)) {
+ found = resolve_collision_directly(c, key, &znode, &n,
+ old_lnum, old_offs);
+ dbg_tnc("rc returned %d, znode %p, n %d, LEB %d:%d",
+ found, znode, n, old_lnum, old_offs);
+ if (found < 0) {
+ err = found;
+ goto out_unlock;
+ }
+
+ if (found) {
+ /* Ensure the znode is dirtied */
+ if (znode->cnext || !ubifs_zn_dirty(znode)) {
+ znode = dirty_cow_bottom_up(c, znode);
+ if (IS_ERR(znode)) {
+ err = PTR_ERR(znode);
+ goto out_unlock;
+ }
+ }
+ zbr = &znode->zbranch[n];
+ lnc_free(zbr);
+ err = ubifs_add_dirt(c, zbr->lnum,
+ zbr->len);
+ if (err)
+ goto out_unlock;
+ zbr->lnum = lnum;
+ zbr->offs = offs;
+ zbr->len = len;
+ }
+ }
+ }
+
+ if (!found)
+ err = ubifs_add_dirt(c, lnum, len);
+
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+
+out_unlock:
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * ubifs_tnc_add_nm - add a "hashed" node to TNC.
+ * @c: UBIFS file-system description object
+ * @key: key to add
+ * @lnum: LEB number of node
+ * @offs: node offset
+ * @len: node length
+ * @nm: node name
+ *
+ * This is the same as 'ubifs_tnc_add()' but it should be used with keys which
+ * may have collisions, like directory entry keys.
+ */
+int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
+ int lnum, int offs, int len, const struct qstr *nm)
+{
+ int found, n, err = 0;
+ struct ubifs_znode *znode;
+
+ mutex_lock(&c->tnc_mutex);
+ dbg_tnc("LEB %d:%d, name '%.*s', key %s", lnum, offs, nm->len, nm->name,
+ DBGKEY(key));
+ found = lookup_level0_dirty(c, key, &znode, &n);
+ if (found < 0) {
+ err = found;
+ goto out_unlock;
+ }
+
+ if (found == 1) {
+ if (c->replaying)
+ found = fallible_resolve_collision(c, key, &znode, &n,
+ nm, 1);
+ else
+ found = resolve_collision(c, key, &znode, &n, nm);
+ dbg_tnc("rc returned %d, znode %p, n %d", found, znode, n);
+ if (found < 0) {
+ err = found;
+ goto out_unlock;
+ }
+
+ /* Ensure the znode is dirtied */
+ if (znode->cnext || !ubifs_zn_dirty(znode)) {
+ znode = dirty_cow_bottom_up(c, znode);
+ if (IS_ERR(znode)) {
+ err = PTR_ERR(znode);
+ goto out_unlock;
+ }
+ }
+
+ if (found == 1) {
+ struct ubifs_zbranch *zbr = &znode->zbranch[n];
+
+ lnc_free(zbr);
+ err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
+ zbr->lnum = lnum;
+ zbr->offs = offs;
+ zbr->len = len;
+ goto out_unlock;
+ }
+ }
+
+ if (!found) {
+ struct ubifs_zbranch zbr;
+
+ zbr.znode = NULL;
+ zbr.lnum = lnum;
+ zbr.offs = offs;
+ zbr.len = len;
+ key_copy(c, key, &zbr.key);
+ err = tnc_insert(c, znode, &zbr, n + 1);
+ if (err)
+ goto out_unlock;
+ if (c->replaying) {
+ /*
+ * We did not find it in the index so there may be a
+ * dangling branch still in the index. So we remove it
+ * by passing 'ubifs_tnc_remove_nm()' the same key but
+ * an unmatchable name.
+ */
+ struct qstr noname = { .len = 0, .name = "" };
+
+ err = dbg_check_tnc(c, 0);
+ mutex_unlock(&c->tnc_mutex);
+ if (err)
+ return err;
+ return ubifs_tnc_remove_nm(c, key, &noname);
+ }
+ }
+
+out_unlock:
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * tnc_delete - delete a znode form TNC.
+ * @c: UBIFS file-system description object
+ * @znode: znode to delete from
+ * @n: zbranch slot number to delete
+ *
+ * This function deletes a leaf node from @n-th slot of @znode. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
+{
+ struct ubifs_zbranch *zbr;
+ struct ubifs_znode *zp;
+ int i, err;
+
+ /* Delete without merge for now */
+ ubifs_assert(znode->level == 0);
+ ubifs_assert(n >= 0 && n < c->fanout);
+ dbg_tnc("deleting %s", DBGKEY(&znode->zbranch[n].key));
+
+ zbr = &znode->zbranch[n];
+ lnc_free(zbr);
+
+ err = ubifs_add_dirt(c, zbr->lnum, zbr->len);
+ if (err) {
+ dbg_dump_znode(c, znode);
+ return err;
+ }
+
+ /* We do not "gap" zbranch slots */
+ for (i = n; i < znode->child_cnt - 1; i++)
+ znode->zbranch[i] = znode->zbranch[i + 1];
+ znode->child_cnt -= 1;
+
+ if (znode->child_cnt > 0)
+ return 0;
+
+ /*
+ * This was the last zbranch, we have to delete this znode from the
+ * parent.
+ */
+
+ do {
+ ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
+ ubifs_assert(ubifs_zn_dirty(znode));
+
+ zp = znode->parent;
+ n = znode->iip;
+
+ atomic_long_dec(&c->dirty_zn_cnt);
+
+ err = insert_old_idx_znode(c, znode);
+ if (err)
+ return err;
+
+ if (znode->cnext) {
+ __set_bit(OBSOLETE_ZNODE, &znode->flags);
+ atomic_long_inc(&c->clean_zn_cnt);
+ atomic_long_inc(&ubifs_clean_zn_cnt);
+ } else
+ kfree(znode);
+ znode = zp;
+ } while (znode->child_cnt == 1); /* while removing last child */
+
+ /* Remove from znode, entry n - 1 */
+ znode->child_cnt -= 1;
+ ubifs_assert(znode->level != 0);
+ for (i = n; i < znode->child_cnt; i++) {
+ znode->zbranch[i] = znode->zbranch[i + 1];
+ if (znode->zbranch[i].znode)
+ znode->zbranch[i].znode->iip = i;
+ }
+
+ /*
+ * If this is the root and it has only 1 child then
+ * collapse the tree.
+ */
+ if (!znode->parent) {
+ while (znode->child_cnt == 1 && znode->level != 0) {
+ zp = znode;
+ zbr = &znode->zbranch[0];
+ znode = get_znode(c, znode, 0);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ znode = dirty_cow_znode(c, zbr);
+ if (IS_ERR(znode))
+ return PTR_ERR(znode);
+ znode->parent = NULL;
+ znode->iip = 0;
+ if (c->zroot.len) {
+ err = insert_old_idx(c, c->zroot.lnum,
+ c->zroot.offs);
+ if (err)
+ return err;
+ }
+ c->zroot.lnum = zbr->lnum;
+ c->zroot.offs = zbr->offs;
+ c->zroot.len = zbr->len;
+ c->zroot.znode = znode;
+ ubifs_assert(!test_bit(OBSOLETE_ZNODE,
+ &zp->flags));
+ ubifs_assert(test_bit(DIRTY_ZNODE, &zp->flags));
+ atomic_long_dec(&c->dirty_zn_cnt);
+
+ if (zp->cnext) {
+ __set_bit(OBSOLETE_ZNODE, &zp->flags);
+ atomic_long_inc(&c->clean_zn_cnt);
+ atomic_long_inc(&ubifs_clean_zn_cnt);
+ } else
+ kfree(zp);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ubifs_tnc_remove - remove an index entry of a node.
+ * @c: UBIFS file-system description object
+ * @key: key of node
+ *
+ * Returns %0 on success or negative error code on failure.
+ */
+int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key)
+{
+ int found, n, err = 0;
+ struct ubifs_znode *znode;
+
+ mutex_lock(&c->tnc_mutex);
+ dbg_tnc("key %s", DBGKEY(key));
+ found = lookup_level0_dirty(c, key, &znode, &n);
+ if (found < 0) {
+ err = found;
+ goto out_unlock;
+ }
+ if (found == 1)
+ err = tnc_delete(c, znode, n);
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+
+out_unlock:
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * ubifs_tnc_remove_nm - remove an index entry for a "hashed" node.
+ * @c: UBIFS file-system description object
+ * @key: key of node
+ * @nm: directory entry name
+ *
+ * Returns %0 on success or negative error code on failure.
+ */
+int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
+ const struct qstr *nm)
+{
+ int n, err;
+ struct ubifs_znode *znode;
+
+ mutex_lock(&c->tnc_mutex);
+ dbg_tnc("%.*s, key %s", nm->len, nm->name, DBGKEY(key));
+ err = lookup_level0_dirty(c, key, &znode, &n);
+ if (err < 0)
+ goto out_unlock;
+
+ if (err) {
+ if (c->replaying)
+ err = fallible_resolve_collision(c, key, &znode, &n,
+ nm, 0);
+ else
+ err = resolve_collision(c, key, &znode, &n, nm);
+ dbg_tnc("rc returned %d, znode %p, n %d", err, znode, n);
+ if (err < 0)
+ goto out_unlock;
+ if (err) {
+ /* Ensure the znode is dirtied */
+ if (znode->cnext || !ubifs_zn_dirty(znode)) {
+ znode = dirty_cow_bottom_up(c, znode);
+ if (IS_ERR(znode)) {
+ err = PTR_ERR(znode);
+ goto out_unlock;
+ }
+ }
+ err = tnc_delete(c, znode, n);
+ }
+ }
+
+out_unlock:
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * key_in_range - determine if a key falls within a range of keys.
+ * @c: UBIFS file-system description object
+ * @key: key to check
+ * @from_key: lowest key in range
+ * @to_key: highest key in range
+ *
+ * This function returns %1 if the key is in range and %0 otherwise.
+ */
+static int key_in_range(struct ubifs_info *c, union ubifs_key *key,
+ union ubifs_key *from_key, union ubifs_key *to_key)
+{
+ if (keys_cmp(c, key, from_key) < 0)
+ return 0;
+ if (keys_cmp(c, key, to_key) > 0)
+ return 0;
+ return 1;
+}
+
+/**
+ * ubifs_tnc_remove_range - remove index entries in range.
+ * @c: UBIFS file-system description object
+ * @from_key: lowest key to remove
+ * @to_key: highest key to remove
+ *
+ * This function removes index entries starting at @from_key and ending at
+ * @to_key. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
+ union ubifs_key *to_key)
+{
+ int i, n, k, err = 0;
+ struct ubifs_znode *znode;
+ union ubifs_key *key;
+
+ mutex_lock(&c->tnc_mutex);
+ while (1) {
+ /* Find first level 0 znode that contains keys to remove */
+ err = ubifs_lookup_level0(c, from_key, &znode, &n);
+ if (err < 0)
+ goto out_unlock;
+
+ if (err)
+ key = from_key;
+ else {
+ err = tnc_next(c, &znode, &n);
+ if (err == -ENOENT) {
+ err = 0;
+ goto out_unlock;
+ }
+ if (err < 0)
+ goto out_unlock;
+ key = &znode->zbranch[n].key;
+ if (!key_in_range(c, key, from_key, to_key)) {
+ err = 0;
+ goto out_unlock;
+ }
+ }
+
+ /* Ensure the znode is dirtied */
+ if (znode->cnext || !ubifs_zn_dirty(znode)) {
+ znode = dirty_cow_bottom_up(c, znode);
+ if (IS_ERR(znode)) {
+ err = PTR_ERR(znode);
+ goto out_unlock;
+ }
+ }
+
+ /* Remove all keys in range except the first */
+ for (i = n + 1, k = 0; i < znode->child_cnt; i++, k++) {
+ key = &znode->zbranch[i].key;
+ if (!key_in_range(c, key, from_key, to_key))
+ break;
+ lnc_free(&znode->zbranch[i]);
+ err = ubifs_add_dirt(c, znode->zbranch[i].lnum,
+ znode->zbranch[i].len);
+ if (err) {
+ dbg_dump_znode(c, znode);
+ goto out_unlock;
+ }
+ dbg_tnc("removing %s", DBGKEY(key));
+ }
+ if (k) {
+ for (i = n + 1 + k; i < znode->child_cnt; i++)
+ znode->zbranch[i - k] = znode->zbranch[i];
+ znode->child_cnt -= k;
+ }
+
+ /* Now delete the first */
+ err = tnc_delete(c, znode, n);
+ if (err)
+ goto out_unlock;
+ }
+
+out_unlock:
+ if (!err)
+ err = dbg_check_tnc(c, 0);
+ mutex_unlock(&c->tnc_mutex);
+ return err;
+}
+
+/**
+ * ubifs_tnc_remove_ino - remove an inode from TNC.
+ * @c: UBIFS file-system description object
+ * @inum: inode number to remove
+ *
+ * This function remove inode @inum and all the extended attributes associated
+ * with the anode from TNC and returns zero in case of success or a negative
+ * error code in case of failure.
+ */
+int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum)
+{
+ union ubifs_key key1, key2;
+ struct ubifs_dent_node *xent, *pxent = NULL;
+ struct qstr nm = { .name = NULL };
+
+ dbg_tnc("ino %lu", (unsigned long)inum);
+
+ /*
+ * Walk all extended attribute entries and remove them together with
+ * corresponding extended attribute inodes.
+ */
+ lowest_xent_key(c, &key1, inum);
+ while (1) {
+ ino_t xattr_inum;
+ int err;
+
+ xent = ubifs_tnc_next_ent(c, &key1, &nm);
+ if (IS_ERR(xent)) {
+ err = PTR_ERR(xent);
+ if (err == -ENOENT)
+ break;
+ return err;
+ }
+
+ xattr_inum = le64_to_cpu(xent->inum);
+ dbg_tnc("xent '%s', ino %lu", xent->name,
+ (unsigned long)xattr_inum);
+
+ nm.name = (char *)xent->name;
+ nm.len = le16_to_cpu(xent->nlen);
+ err = ubifs_tnc_remove_nm(c, &key1, &nm);
+ if (err) {
+ kfree(xent);
+ return err;
+ }
+
+ lowest_ino_key(c, &key1, xattr_inum);
+ highest_ino_key(c, &key2, xattr_inum);
+ err = ubifs_tnc_remove_range(c, &key1, &key2);
+ if (err) {
+ kfree(xent);
+ return err;
+ }
+
+ kfree(pxent);
+ pxent = xent;
+ key_read(c, &xent->key, &key1);
+ }
+
+ kfree(pxent);
+ lowest_ino_key(c, &key1, inum);
+ highest_ino_key(c, &key2, inum);
+
+ return ubifs_tnc_remove_range(c, &key1, &key2);
+}
+
+/**
+ * ubifs_tnc_next_ent - walk directory or extended attribute entries.
+ * @c: UBIFS file-system description object
+ * @key: key of last entry
+ * @nm: name of last entry found or %NULL
+ *
+ * This function finds and reads the next directory or extended attribute entry
+ * after the given key (@key) if there is one. @nm is used to resolve
+ * collisions.
+ *
+ * If the name of the current entry is not known and only the key is known,
+ * @nm->name has to be %NULL. In this case the semantics of this function is a
+ * little bit different and it returns the entry corresponding to this key, not
+ * the next one. If the key was not found, the closest "right" entry is
+ * returned.
+ *
+ * If the fist entry has to be found, @key has to contain the lowest possible
+ * key value for this inode and @name has to be %NULL.
+ *
+ * This function returns the found directory or extended attribute entry node
+ * in case of success, %-ENOENT is returned if no entry was found, and a
+ * negative error code is returned in case of failure.
+ */
+struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
+ union ubifs_key *key,
+ const struct qstr *nm)
+{
+ int n, err, type = key_type(c, key);
+ struct ubifs_znode *znode;
+ struct ubifs_dent_node *dent;
+ struct ubifs_zbranch *zbr;
+ union ubifs_key *dkey;
+
+ dbg_tnc("%s %s", nm->name ? (char *)nm->name : "(lowest)", DBGKEY(key));
+ ubifs_assert(is_hash_key(c, key));
+
+ mutex_lock(&c->tnc_mutex);
+ err = ubifs_lookup_level0(c, key, &znode, &n);
+ if (unlikely(err < 0))
+ goto out_unlock;
+
+ if (nm->name) {
+ if (err) {
+ /* Handle collisions */
+ err = resolve_collision(c, key, &znode, &n, nm);
+ dbg_tnc("rc returned %d, znode %p, n %d",
+ err, znode, n);
+ if (unlikely(err < 0))
+ goto out_unlock;
+ }
+
+ /* Now find next entry */
+ err = tnc_next(c, &znode, &n);
+ if (unlikely(err))
+ goto out_unlock;
+ } else {
+ /*
+ * The full name of the entry was not given, in which case the
+ * behavior of this function is a little different and it
+ * returns current entry, not the next one.
+ */
+ if (!err) {
+ /*
+ * However, the given key does not exist in the TNC
+ * tree and @znode/@n variables contain the closest
+ * "preceding" element. Switch to the next one.
+ */
+ err = tnc_next(c, &znode, &n);
+ if (err)
+ goto out_unlock;
+ }
+ }
+
+ zbr = &znode->zbranch[n];
+ dent = kmalloc(zbr->len, GFP_NOFS);
+ if (unlikely(!dent)) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /*
+ * The above 'tnc_next()' call could lead us to the next inode, check
+ * this.
+ */
+ dkey = &zbr->key;
+ if (key_inum(c, dkey) != key_inum(c, key) ||
+ key_type(c, dkey) != type) {
+ err = -ENOENT;
+ goto out_free;
+ }
+
+ err = tnc_read_node_nm(c, zbr, dent);
+ if (unlikely(err))
+ goto out_free;
+
+ mutex_unlock(&c->tnc_mutex);
+ return dent;
+
+out_free:
+ kfree(dent);
+out_unlock:
+ mutex_unlock(&c->tnc_mutex);
+ return ERR_PTR(err);
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/tnc_misc.c b/qemu/roms/u-boot/fs/ubifs/tnc_misc.c
new file mode 100644
index 000000000..955219fa0
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/tnc_misc.c
@@ -0,0 +1,435 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Adrian Hunter
+ * Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file contains miscelanious TNC-related functions shared betweend
+ * different files. This file does not form any logically separate TNC
+ * sub-system. The file was created because there is a lot of TNC code and
+ * putting it all in one file would make that file too big and unreadable.
+ */
+
+#include "ubifs.h"
+
+/**
+ * ubifs_tnc_levelorder_next - next TNC tree element in levelorder traversal.
+ * @zr: root of the subtree to traverse
+ * @znode: previous znode
+ *
+ * This function implements levelorder TNC traversal. The LNC is ignored.
+ * Returns the next element or %NULL if @znode is already the last one.
+ */
+struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
+ struct ubifs_znode *znode)
+{
+ int level, iip, level_search = 0;
+ struct ubifs_znode *zn;
+
+ ubifs_assert(zr);
+
+ if (unlikely(!znode))
+ return zr;
+
+ if (unlikely(znode == zr)) {
+ if (znode->level == 0)
+ return NULL;
+ return ubifs_tnc_find_child(zr, 0);
+ }
+
+ level = znode->level;
+
+ iip = znode->iip;
+ while (1) {
+ ubifs_assert(znode->level <= zr->level);
+
+ /*
+ * First walk up until there is a znode with next branch to
+ * look at.
+ */
+ while (znode->parent != zr && iip >= znode->parent->child_cnt) {
+ znode = znode->parent;
+ iip = znode->iip;
+ }
+
+ if (unlikely(znode->parent == zr &&
+ iip >= znode->parent->child_cnt)) {
+ /* This level is done, switch to the lower one */
+ level -= 1;
+ if (level_search || level < 0)
+ /*
+ * We were already looking for znode at lower
+ * level ('level_search'). As we are here
+ * again, it just does not exist. Or all levels
+ * were finished ('level < 0').
+ */
+ return NULL;
+
+ level_search = 1;
+ iip = -1;
+ znode = ubifs_tnc_find_child(zr, 0);
+ ubifs_assert(znode);
+ }
+
+ /* Switch to the next index */
+ zn = ubifs_tnc_find_child(znode->parent, iip + 1);
+ if (!zn) {
+ /* No more children to look at, we have walk up */
+ iip = znode->parent->child_cnt;
+ continue;
+ }
+
+ /* Walk back down to the level we came from ('level') */
+ while (zn->level != level) {
+ znode = zn;
+ zn = ubifs_tnc_find_child(zn, 0);
+ if (!zn) {
+ /*
+ * This path is not too deep so it does not
+ * reach 'level'. Try next path.
+ */
+ iip = znode->iip;
+ break;
+ }
+ }
+
+ if (zn) {
+ ubifs_assert(zn->level >= 0);
+ return zn;
+ }
+ }
+}
+
+/**
+ * ubifs_search_zbranch - search znode branch.
+ * @c: UBIFS file-system description object
+ * @znode: znode to search in
+ * @key: key to search for
+ * @n: znode branch slot number is returned here
+ *
+ * This is a helper function which search branch with key @key in @znode using
+ * binary search. The result of the search may be:
+ * o exact match, then %1 is returned, and the slot number of the branch is
+ * stored in @n;
+ * o no exact match, then %0 is returned and the slot number of the left
+ * closest branch is returned in @n; the slot if all keys in this znode are
+ * greater than @key, then %-1 is returned in @n.
+ */
+int ubifs_search_zbranch(const struct ubifs_info *c,
+ const struct ubifs_znode *znode,
+ const union ubifs_key *key, int *n)
+{
+ int beg = 0, end = znode->child_cnt, uninitialized_var(mid);
+ int uninitialized_var(cmp);
+ const struct ubifs_zbranch *zbr = &znode->zbranch[0];
+
+ ubifs_assert(end > beg);
+
+ while (end > beg) {
+ mid = (beg + end) >> 1;
+ cmp = keys_cmp(c, key, &zbr[mid].key);
+ if (cmp > 0)
+ beg = mid + 1;
+ else if (cmp < 0)
+ end = mid;
+ else {
+ *n = mid;
+ return 1;
+ }
+ }
+
+ *n = end - 1;
+
+ /* The insert point is after *n */
+ ubifs_assert(*n >= -1 && *n < znode->child_cnt);
+ if (*n == -1)
+ ubifs_assert(keys_cmp(c, key, &zbr[0].key) < 0);
+ else
+ ubifs_assert(keys_cmp(c, key, &zbr[*n].key) > 0);
+ if (*n + 1 < znode->child_cnt)
+ ubifs_assert(keys_cmp(c, key, &zbr[*n + 1].key) < 0);
+
+ return 0;
+}
+
+/**
+ * ubifs_tnc_postorder_first - find first znode to do postorder tree traversal.
+ * @znode: znode to start at (root of the sub-tree to traverse)
+ *
+ * Find the lowest leftmost znode in a subtree of the TNC tree. The LNC is
+ * ignored.
+ */
+struct ubifs_znode *ubifs_tnc_postorder_first(struct ubifs_znode *znode)
+{
+ if (unlikely(!znode))
+ return NULL;
+
+ while (znode->level > 0) {
+ struct ubifs_znode *child;
+
+ child = ubifs_tnc_find_child(znode, 0);
+ if (!child)
+ return znode;
+ znode = child;
+ }
+
+ return znode;
+}
+
+/**
+ * ubifs_tnc_postorder_next - next TNC tree element in postorder traversal.
+ * @znode: previous znode
+ *
+ * This function implements postorder TNC traversal. The LNC is ignored.
+ * Returns the next element or %NULL if @znode is already the last one.
+ */
+struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode)
+{
+ struct ubifs_znode *zn;
+
+ ubifs_assert(znode);
+ if (unlikely(!znode->parent))
+ return NULL;
+
+ /* Switch to the next index in the parent */
+ zn = ubifs_tnc_find_child(znode->parent, znode->iip + 1);
+ if (!zn)
+ /* This is in fact the last child, return parent */
+ return znode->parent;
+
+ /* Go to the first znode in this new subtree */
+ return ubifs_tnc_postorder_first(zn);
+}
+
+/**
+ * read_znode - read an indexing node from flash and fill znode.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB of the indexing node to read
+ * @offs: node offset
+ * @len: node length
+ * @znode: znode to read to
+ *
+ * This function reads an indexing node from the flash media and fills znode
+ * with the read data. Returns zero in case of success and a negative error
+ * code in case of failure. The read indexing node is validated and if anything
+ * is wrong with it, this function prints complaint messages and returns
+ * %-EINVAL.
+ */
+static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
+ struct ubifs_znode *znode)
+{
+ int i, err, type, cmp;
+ struct ubifs_idx_node *idx;
+
+ idx = kmalloc(c->max_idx_node_sz, GFP_NOFS);
+ if (!idx)
+ return -ENOMEM;
+
+ err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
+ if (err < 0) {
+ kfree(idx);
+ return err;
+ }
+
+ znode->child_cnt = le16_to_cpu(idx->child_cnt);
+ znode->level = le16_to_cpu(idx->level);
+
+ dbg_tnc("LEB %d:%d, level %d, %d branch",
+ lnum, offs, znode->level, znode->child_cnt);
+
+ if (znode->child_cnt > c->fanout || znode->level > UBIFS_MAX_LEVELS) {
+ dbg_err("current fanout %d, branch count %d",
+ c->fanout, znode->child_cnt);
+ dbg_err("max levels %d, znode level %d",
+ UBIFS_MAX_LEVELS, znode->level);
+ err = 1;
+ goto out_dump;
+ }
+
+ for (i = 0; i < znode->child_cnt; i++) {
+ const struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
+ struct ubifs_zbranch *zbr = &znode->zbranch[i];
+
+ key_read(c, &br->key, &zbr->key);
+ zbr->lnum = le32_to_cpu(br->lnum);
+ zbr->offs = le32_to_cpu(br->offs);
+ zbr->len = le32_to_cpu(br->len);
+ zbr->znode = NULL;
+
+ /* Validate branch */
+
+ if (zbr->lnum < c->main_first ||
+ zbr->lnum >= c->leb_cnt || zbr->offs < 0 ||
+ zbr->offs + zbr->len > c->leb_size || zbr->offs & 7) {
+ dbg_err("bad branch %d", i);
+ err = 2;
+ goto out_dump;
+ }
+
+ switch (key_type(c, &zbr->key)) {
+ case UBIFS_INO_KEY:
+ case UBIFS_DATA_KEY:
+ case UBIFS_DENT_KEY:
+ case UBIFS_XENT_KEY:
+ break;
+ default:
+ dbg_msg("bad key type at slot %d: %s", i,
+ DBGKEY(&zbr->key));
+ err = 3;
+ goto out_dump;
+ }
+
+ if (znode->level)
+ continue;
+
+ type = key_type(c, &zbr->key);
+ if (c->ranges[type].max_len == 0) {
+ if (zbr->len != c->ranges[type].len) {
+ dbg_err("bad target node (type %d) length (%d)",
+ type, zbr->len);
+ dbg_err("have to be %d", c->ranges[type].len);
+ err = 4;
+ goto out_dump;
+ }
+ } else if (zbr->len < c->ranges[type].min_len ||
+ zbr->len > c->ranges[type].max_len) {
+ dbg_err("bad target node (type %d) length (%d)",
+ type, zbr->len);
+ dbg_err("have to be in range of %d-%d",
+ c->ranges[type].min_len,
+ c->ranges[type].max_len);
+ err = 5;
+ goto out_dump;
+ }
+ }
+
+ /*
+ * Ensure that the next key is greater or equivalent to the
+ * previous one.
+ */
+ for (i = 0; i < znode->child_cnt - 1; i++) {
+ const union ubifs_key *key1, *key2;
+
+ key1 = &znode->zbranch[i].key;
+ key2 = &znode->zbranch[i + 1].key;
+
+ cmp = keys_cmp(c, key1, key2);
+ if (cmp > 0) {
+ dbg_err("bad key order (keys %d and %d)", i, i + 1);
+ err = 6;
+ goto out_dump;
+ } else if (cmp == 0 && !is_hash_key(c, key1)) {
+ /* These can only be keys with colliding hash */
+ dbg_err("keys %d and %d are not hashed but equivalent",
+ i, i + 1);
+ err = 7;
+ goto out_dump;
+ }
+ }
+
+ kfree(idx);
+ return 0;
+
+out_dump:
+ ubifs_err("bad indexing node at LEB %d:%d, error %d", lnum, offs, err);
+ dbg_dump_node(c, idx);
+ kfree(idx);
+ return -EINVAL;
+}
+
+/**
+ * ubifs_load_znode - load znode to TNC cache.
+ * @c: UBIFS file-system description object
+ * @zbr: znode branch
+ * @parent: znode's parent
+ * @iip: index in parent
+ *
+ * This function loads znode pointed to by @zbr into the TNC cache and
+ * returns pointer to it in case of success and a negative error code in case
+ * of failure.
+ */
+struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
+ struct ubifs_zbranch *zbr,
+ struct ubifs_znode *parent, int iip)
+{
+ int err;
+ struct ubifs_znode *znode;
+
+ ubifs_assert(!zbr->znode);
+ /*
+ * A slab cache is not presently used for znodes because the znode size
+ * depends on the fanout which is stored in the superblock.
+ */
+ znode = kzalloc(c->max_znode_sz, GFP_NOFS);
+ if (!znode)
+ return ERR_PTR(-ENOMEM);
+
+ err = read_znode(c, zbr->lnum, zbr->offs, zbr->len, znode);
+ if (err)
+ goto out;
+
+ zbr->znode = znode;
+ znode->parent = parent;
+ znode->time = get_seconds();
+ znode->iip = iip;
+
+ return znode;
+
+out:
+ kfree(znode);
+ return ERR_PTR(err);
+}
+
+/**
+ * ubifs_tnc_read_node - read a leaf node from the flash media.
+ * @c: UBIFS file-system description object
+ * @zbr: key and position of the node
+ * @node: node is returned here
+ *
+ * This function reads a node defined by @zbr from the flash media. Returns
+ * zero in case of success or a negative negative error code in case of
+ * failure.
+ */
+int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ void *node)
+{
+ union ubifs_key key1, *key = &zbr->key;
+ int err, type = key_type(c, key);
+
+ err = ubifs_read_node(c, node, type, zbr->len, zbr->lnum, zbr->offs);
+
+ if (err) {
+ dbg_tnc("key %s", DBGKEY(key));
+ return err;
+ }
+
+ /* Make sure the key of the read node is correct */
+ key_read(c, node + UBIFS_KEY_OFFSET, &key1);
+ if (!keys_eq(c, key, &key1)) {
+ ubifs_err("bad key in node at LEB %d:%d",
+ zbr->lnum, zbr->offs);
+ dbg_tnc("looked for key %s found node's key %s",
+ DBGKEY(key), DBGKEY1(&key1));
+ dbg_dump_node(c, node);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/ubifs-media.h b/qemu/roms/u-boot/fs/ubifs/ubifs-media.h
new file mode 100644
index 000000000..3eee07e0c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/ubifs-media.h
@@ -0,0 +1,775 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+/*
+ * This file describes UBIFS on-flash format and contains definitions of all the
+ * relevant data structures and constants.
+ *
+ * All UBIFS on-flash objects are stored in the form of nodes. All nodes start
+ * with the UBIFS node magic number and have the same common header. Nodes
+ * always sit at 8-byte aligned positions on the media and node header sizes are
+ * also 8-byte aligned (except for the indexing node and the padding node).
+ */
+
+#ifndef __UBIFS_MEDIA_H__
+#define __UBIFS_MEDIA_H__
+
+/* UBIFS node magic number (must not have the padding byte first or last) */
+#define UBIFS_NODE_MAGIC 0x06101831
+
+/*
+ * UBIFS on-flash format version. This version is increased when the on-flash
+ * format is changing. If this happens, UBIFS is will support older versions as
+ * well. But older UBIFS code will not support newer formats. Format changes
+ * will be rare and only when absolutely necessary, e.g. to fix a bug or to add
+ * a new feature.
+ *
+ * UBIFS went into mainline kernel with format version 4. The older formats
+ * were development formats.
+ */
+#define UBIFS_FORMAT_VERSION 4
+
+/*
+ * Read-only compatibility version. If the UBIFS format is changed, older UBIFS
+ * implementations will not be able to mount newer formats in read-write mode.
+ * However, depending on the change, it may be possible to mount newer formats
+ * in R/O mode. This is indicated by the R/O compatibility version which is
+ * stored in the super-block.
+ *
+ * This is needed to support boot-loaders which only need R/O mounting. With
+ * this flag it is possible to do UBIFS format changes without a need to update
+ * boot-loaders.
+ */
+#define UBIFS_RO_COMPAT_VERSION 0
+
+/* Minimum logical eraseblock size in bytes */
+#define UBIFS_MIN_LEB_SZ (15*1024)
+
+/* Initial CRC32 value used when calculating CRC checksums */
+#define UBIFS_CRC32_INIT 0xFFFFFFFFU
+
+/*
+ * UBIFS does not try to compress data if its length is less than the below
+ * constant.
+ */
+#define UBIFS_MIN_COMPR_LEN 128
+
+/*
+ * If compressed data length is less than %UBIFS_MIN_COMPRESS_DIFF bytes
+ * shorter than uncompressed data length, UBIFS prefers to leave this data
+ * node uncompress, because it'll be read faster.
+ */
+#define UBIFS_MIN_COMPRESS_DIFF 64
+
+/* Root inode number */
+#define UBIFS_ROOT_INO 1
+
+/* Lowest inode number used for regular inodes (not UBIFS-only internal ones) */
+#define UBIFS_FIRST_INO 64
+
+/*
+ * Maximum file name and extended attribute length (must be a multiple of 8,
+ * minus 1).
+ */
+#define UBIFS_MAX_NLEN 255
+
+/* Maximum number of data journal heads */
+#define UBIFS_MAX_JHEADS 1
+
+/*
+ * Size of UBIFS data block. Note, UBIFS is not a block oriented file-system,
+ * which means that it does not treat the underlying media as consisting of
+ * blocks like in case of hard drives. Do not be confused. UBIFS block is just
+ * the maximum amount of data which one data node can have or which can be
+ * attached to an inode node.
+ */
+#define UBIFS_BLOCK_SIZE 4096
+#define UBIFS_BLOCK_SHIFT 12
+
+/* UBIFS padding byte pattern (must not be first or last byte of node magic) */
+#define UBIFS_PADDING_BYTE 0xCE
+
+/* Maximum possible key length */
+#define UBIFS_MAX_KEY_LEN 16
+
+/* Key length ("simple" format) */
+#define UBIFS_SK_LEN 8
+
+/* Minimum index tree fanout */
+#define UBIFS_MIN_FANOUT 3
+
+/* Maximum number of levels in UBIFS indexing B-tree */
+#define UBIFS_MAX_LEVELS 512
+
+/* Maximum amount of data attached to an inode in bytes */
+#define UBIFS_MAX_INO_DATA UBIFS_BLOCK_SIZE
+
+/* LEB Properties Tree fanout (must be power of 2) and fanout shift */
+#define UBIFS_LPT_FANOUT 4
+#define UBIFS_LPT_FANOUT_SHIFT 2
+
+/* LEB Properties Tree bit field sizes */
+#define UBIFS_LPT_CRC_BITS 16
+#define UBIFS_LPT_CRC_BYTES 2
+#define UBIFS_LPT_TYPE_BITS 4
+
+/* The key is always at the same position in all keyed nodes */
+#define UBIFS_KEY_OFFSET offsetof(struct ubifs_ino_node, key)
+
+/*
+ * LEB Properties Tree node types.
+ *
+ * UBIFS_LPT_PNODE: LPT leaf node (contains LEB properties)
+ * UBIFS_LPT_NNODE: LPT internal node
+ * UBIFS_LPT_LTAB: LPT's own lprops table
+ * UBIFS_LPT_LSAVE: LPT's save table (big model only)
+ * UBIFS_LPT_NODE_CNT: count of LPT node types
+ * UBIFS_LPT_NOT_A_NODE: all ones (15 for 4 bits) is never a valid node type
+ */
+enum {
+ UBIFS_LPT_PNODE,
+ UBIFS_LPT_NNODE,
+ UBIFS_LPT_LTAB,
+ UBIFS_LPT_LSAVE,
+ UBIFS_LPT_NODE_CNT,
+ UBIFS_LPT_NOT_A_NODE = (1 << UBIFS_LPT_TYPE_BITS) - 1,
+};
+
+/*
+ * UBIFS inode types.
+ *
+ * UBIFS_ITYPE_REG: regular file
+ * UBIFS_ITYPE_DIR: directory
+ * UBIFS_ITYPE_LNK: soft link
+ * UBIFS_ITYPE_BLK: block device node
+ * UBIFS_ITYPE_CHR: character device node
+ * UBIFS_ITYPE_FIFO: fifo
+ * UBIFS_ITYPE_SOCK: socket
+ * UBIFS_ITYPES_CNT: count of supported file types
+ */
+enum {
+ UBIFS_ITYPE_REG,
+ UBIFS_ITYPE_DIR,
+ UBIFS_ITYPE_LNK,
+ UBIFS_ITYPE_BLK,
+ UBIFS_ITYPE_CHR,
+ UBIFS_ITYPE_FIFO,
+ UBIFS_ITYPE_SOCK,
+ UBIFS_ITYPES_CNT,
+};
+
+/*
+ * Supported key hash functions.
+ *
+ * UBIFS_KEY_HASH_R5: R5 hash
+ * UBIFS_KEY_HASH_TEST: test hash which just returns first 4 bytes of the name
+ */
+enum {
+ UBIFS_KEY_HASH_R5,
+ UBIFS_KEY_HASH_TEST,
+};
+
+/*
+ * Supported key formats.
+ *
+ * UBIFS_SIMPLE_KEY_FMT: simple key format
+ */
+enum {
+ UBIFS_SIMPLE_KEY_FMT,
+};
+
+/*
+ * The simple key format uses 29 bits for storing UBIFS block number and hash
+ * value.
+ */
+#define UBIFS_S_KEY_BLOCK_BITS 29
+#define UBIFS_S_KEY_BLOCK_MASK 0x1FFFFFFF
+#define UBIFS_S_KEY_HASH_BITS UBIFS_S_KEY_BLOCK_BITS
+#define UBIFS_S_KEY_HASH_MASK UBIFS_S_KEY_BLOCK_MASK
+
+/*
+ * Key types.
+ *
+ * UBIFS_INO_KEY: inode node key
+ * UBIFS_DATA_KEY: data node key
+ * UBIFS_DENT_KEY: directory entry node key
+ * UBIFS_XENT_KEY: extended attribute entry key
+ * UBIFS_KEY_TYPES_CNT: number of supported key types
+ */
+enum {
+ UBIFS_INO_KEY,
+ UBIFS_DATA_KEY,
+ UBIFS_DENT_KEY,
+ UBIFS_XENT_KEY,
+ UBIFS_KEY_TYPES_CNT,
+};
+
+/* Count of LEBs reserved for the superblock area */
+#define UBIFS_SB_LEBS 1
+/* Count of LEBs reserved for the master area */
+#define UBIFS_MST_LEBS 2
+
+/* First LEB of the superblock area */
+#define UBIFS_SB_LNUM 0
+/* First LEB of the master area */
+#define UBIFS_MST_LNUM (UBIFS_SB_LNUM + UBIFS_SB_LEBS)
+/* First LEB of the log area */
+#define UBIFS_LOG_LNUM (UBIFS_MST_LNUM + UBIFS_MST_LEBS)
+
+/*
+ * The below constants define the absolute minimum values for various UBIFS
+ * media areas. Many of them actually depend of flash geometry and the FS
+ * configuration (number of journal heads, orphan LEBs, etc). This means that
+ * the smallest volume size which can be used for UBIFS cannot be pre-defined
+ * by these constants. The file-system that meets the below limitation will not
+ * necessarily mount. UBIFS does run-time calculations and validates the FS
+ * size.
+ */
+
+/* Minimum number of logical eraseblocks in the log */
+#define UBIFS_MIN_LOG_LEBS 2
+/* Minimum number of bud logical eraseblocks (one for each head) */
+#define UBIFS_MIN_BUD_LEBS 3
+/* Minimum number of journal logical eraseblocks */
+#define UBIFS_MIN_JNL_LEBS (UBIFS_MIN_LOG_LEBS + UBIFS_MIN_BUD_LEBS)
+/* Minimum number of LPT area logical eraseblocks */
+#define UBIFS_MIN_LPT_LEBS 2
+/* Minimum number of orphan area logical eraseblocks */
+#define UBIFS_MIN_ORPH_LEBS 1
+/*
+ * Minimum number of main area logical eraseblocks (buds, 3 for the index, 1
+ * for GC, 1 for deletions, and at least 1 for committed data).
+ */
+#define UBIFS_MIN_MAIN_LEBS (UBIFS_MIN_BUD_LEBS + 6)
+
+/* Minimum number of logical eraseblocks */
+#define UBIFS_MIN_LEB_CNT (UBIFS_SB_LEBS + UBIFS_MST_LEBS + \
+ UBIFS_MIN_LOG_LEBS + UBIFS_MIN_LPT_LEBS + \
+ UBIFS_MIN_ORPH_LEBS + UBIFS_MIN_MAIN_LEBS)
+
+/* Node sizes (N.B. these are guaranteed to be multiples of 8) */
+#define UBIFS_CH_SZ sizeof(struct ubifs_ch)
+#define UBIFS_INO_NODE_SZ sizeof(struct ubifs_ino_node)
+#define UBIFS_DATA_NODE_SZ sizeof(struct ubifs_data_node)
+#define UBIFS_DENT_NODE_SZ sizeof(struct ubifs_dent_node)
+#define UBIFS_TRUN_NODE_SZ sizeof(struct ubifs_trun_node)
+#define UBIFS_PAD_NODE_SZ sizeof(struct ubifs_pad_node)
+#define UBIFS_SB_NODE_SZ sizeof(struct ubifs_sb_node)
+#define UBIFS_MST_NODE_SZ sizeof(struct ubifs_mst_node)
+#define UBIFS_REF_NODE_SZ sizeof(struct ubifs_ref_node)
+#define UBIFS_IDX_NODE_SZ sizeof(struct ubifs_idx_node)
+#define UBIFS_CS_NODE_SZ sizeof(struct ubifs_cs_node)
+#define UBIFS_ORPH_NODE_SZ sizeof(struct ubifs_orph_node)
+/* Extended attribute entry nodes are identical to directory entry nodes */
+#define UBIFS_XENT_NODE_SZ UBIFS_DENT_NODE_SZ
+/* Only this does not have to be multiple of 8 bytes */
+#define UBIFS_BRANCH_SZ sizeof(struct ubifs_branch)
+
+/* Maximum node sizes (N.B. these are guaranteed to be multiples of 8) */
+#define UBIFS_MAX_DATA_NODE_SZ (UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE)
+#define UBIFS_MAX_INO_NODE_SZ (UBIFS_INO_NODE_SZ + UBIFS_MAX_INO_DATA)
+#define UBIFS_MAX_DENT_NODE_SZ (UBIFS_DENT_NODE_SZ + UBIFS_MAX_NLEN + 1)
+#define UBIFS_MAX_XENT_NODE_SZ UBIFS_MAX_DENT_NODE_SZ
+
+/* The largest UBIFS node */
+#define UBIFS_MAX_NODE_SZ UBIFS_MAX_INO_NODE_SZ
+
+/*
+ * On-flash inode flags.
+ *
+ * UBIFS_COMPR_FL: use compression for this inode
+ * UBIFS_SYNC_FL: I/O on this inode has to be synchronous
+ * UBIFS_IMMUTABLE_FL: inode is immutable
+ * UBIFS_APPEND_FL: writes to the inode may only append data
+ * UBIFS_DIRSYNC_FL: I/O on this directory inode has to be synchronous
+ * UBIFS_XATTR_FL: this inode is the inode for an extended attribute value
+ *
+ * Note, these are on-flash flags which correspond to ioctl flags
+ * (@FS_COMPR_FL, etc). They have the same values now, but generally, do not
+ * have to be the same.
+ */
+enum {
+ UBIFS_COMPR_FL = 0x01,
+ UBIFS_SYNC_FL = 0x02,
+ UBIFS_IMMUTABLE_FL = 0x04,
+ UBIFS_APPEND_FL = 0x08,
+ UBIFS_DIRSYNC_FL = 0x10,
+ UBIFS_XATTR_FL = 0x20,
+};
+
+/* Inode flag bits used by UBIFS */
+#define UBIFS_FL_MASK 0x0000001F
+
+/*
+ * UBIFS compression algorithms.
+ *
+ * UBIFS_COMPR_NONE: no compression
+ * UBIFS_COMPR_LZO: LZO compression
+ * UBIFS_COMPR_ZLIB: ZLIB compression
+ * UBIFS_COMPR_TYPES_CNT: count of supported compression types
+ */
+enum {
+ UBIFS_COMPR_NONE,
+ UBIFS_COMPR_LZO,
+ UBIFS_COMPR_ZLIB,
+ UBIFS_COMPR_TYPES_CNT,
+};
+
+/*
+ * UBIFS node types.
+ *
+ * UBIFS_INO_NODE: inode node
+ * UBIFS_DATA_NODE: data node
+ * UBIFS_DENT_NODE: directory entry node
+ * UBIFS_XENT_NODE: extended attribute node
+ * UBIFS_TRUN_NODE: truncation node
+ * UBIFS_PAD_NODE: padding node
+ * UBIFS_SB_NODE: superblock node
+ * UBIFS_MST_NODE: master node
+ * UBIFS_REF_NODE: LEB reference node
+ * UBIFS_IDX_NODE: index node
+ * UBIFS_CS_NODE: commit start node
+ * UBIFS_ORPH_NODE: orphan node
+ * UBIFS_NODE_TYPES_CNT: count of supported node types
+ *
+ * Note, we index arrays by these numbers, so keep them low and contiguous.
+ * Node type constants for inodes, direntries and so on have to be the same as
+ * corresponding key type constants.
+ */
+enum {
+ UBIFS_INO_NODE,
+ UBIFS_DATA_NODE,
+ UBIFS_DENT_NODE,
+ UBIFS_XENT_NODE,
+ UBIFS_TRUN_NODE,
+ UBIFS_PAD_NODE,
+ UBIFS_SB_NODE,
+ UBIFS_MST_NODE,
+ UBIFS_REF_NODE,
+ UBIFS_IDX_NODE,
+ UBIFS_CS_NODE,
+ UBIFS_ORPH_NODE,
+ UBIFS_NODE_TYPES_CNT,
+};
+
+/*
+ * Master node flags.
+ *
+ * UBIFS_MST_DIRTY: rebooted uncleanly - master node is dirty
+ * UBIFS_MST_NO_ORPHS: no orphan inodes present
+ * UBIFS_MST_RCVRY: written by recovery
+ */
+enum {
+ UBIFS_MST_DIRTY = 1,
+ UBIFS_MST_NO_ORPHS = 2,
+ UBIFS_MST_RCVRY = 4,
+};
+
+/*
+ * Node group type (used by recovery to recover whole group or none).
+ *
+ * UBIFS_NO_NODE_GROUP: this node is not part of a group
+ * UBIFS_IN_NODE_GROUP: this node is a part of a group
+ * UBIFS_LAST_OF_NODE_GROUP: this node is the last in a group
+ */
+enum {
+ UBIFS_NO_NODE_GROUP = 0,
+ UBIFS_IN_NODE_GROUP,
+ UBIFS_LAST_OF_NODE_GROUP,
+};
+
+/*
+ * Superblock flags.
+ *
+ * UBIFS_FLG_BIGLPT: if "big" LPT model is used if set
+ */
+enum {
+ UBIFS_FLG_BIGLPT = 0x02,
+};
+
+/**
+ * struct ubifs_ch - common header node.
+ * @magic: UBIFS node magic number (%UBIFS_NODE_MAGIC)
+ * @crc: CRC-32 checksum of the node header
+ * @sqnum: sequence number
+ * @len: full node length
+ * @node_type: node type
+ * @group_type: node group type
+ * @padding: reserved for future, zeroes
+ *
+ * Every UBIFS node starts with this common part. If the node has a key, the
+ * key always goes next.
+ */
+struct ubifs_ch {
+ __le32 magic;
+ __le32 crc;
+ __le64 sqnum;
+ __le32 len;
+ __u8 node_type;
+ __u8 group_type;
+ __u8 padding[2];
+} __attribute__ ((packed));
+
+/**
+ * union ubifs_dev_desc - device node descriptor.
+ * @new: new type device descriptor
+ * @huge: huge type device descriptor
+ *
+ * This data structure describes major/minor numbers of a device node. In an
+ * inode is a device node then its data contains an object of this type. UBIFS
+ * uses standard Linux "new" and "huge" device node encodings.
+ */
+union ubifs_dev_desc {
+ __le32 new;
+ __le64 huge;
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_ino_node - inode node.
+ * @ch: common header
+ * @key: node key
+ * @creat_sqnum: sequence number at time of creation
+ * @size: inode size in bytes (amount of uncompressed data)
+ * @atime_sec: access time seconds
+ * @ctime_sec: creation time seconds
+ * @mtime_sec: modification time seconds
+ * @atime_nsec: access time nanoseconds
+ * @ctime_nsec: creation time nanoseconds
+ * @mtime_nsec: modification time nanoseconds
+ * @nlink: number of hard links
+ * @uid: owner ID
+ * @gid: group ID
+ * @mode: access flags
+ * @flags: per-inode flags (%UBIFS_COMPR_FL, %UBIFS_SYNC_FL, etc)
+ * @data_len: inode data length
+ * @xattr_cnt: count of extended attributes this inode has
+ * @xattr_size: summarized size of all extended attributes in bytes
+ * @padding1: reserved for future, zeroes
+ * @xattr_names: sum of lengths of all extended attribute names belonging to
+ * this inode
+ * @compr_type: compression type used for this inode
+ * @padding2: reserved for future, zeroes
+ * @data: data attached to the inode
+ *
+ * Note, even though inode compression type is defined by @compr_type, some
+ * nodes of this inode may be compressed with different compressor - this
+ * happens if compression type is changed while the inode already has data
+ * nodes. But @compr_type will be use for further writes to the inode.
+ *
+ * Note, do not forget to amend 'zero_ino_node_unused()' function when changing
+ * the padding fields.
+ */
+struct ubifs_ino_node {
+ struct ubifs_ch ch;
+ __u8 key[UBIFS_MAX_KEY_LEN];
+ __le64 creat_sqnum;
+ __le64 size;
+ __le64 atime_sec;
+ __le64 ctime_sec;
+ __le64 mtime_sec;
+ __le32 atime_nsec;
+ __le32 ctime_nsec;
+ __le32 mtime_nsec;
+ __le32 nlink;
+ __le32 uid;
+ __le32 gid;
+ __le32 mode;
+ __le32 flags;
+ __le32 data_len;
+ __le32 xattr_cnt;
+ __le32 xattr_size;
+ __u8 padding1[4]; /* Watch 'zero_ino_node_unused()' if changing! */
+ __le32 xattr_names;
+ __le16 compr_type;
+ __u8 padding2[26]; /* Watch 'zero_ino_node_unused()' if changing! */
+ __u8 data[];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_dent_node - directory entry node.
+ * @ch: common header
+ * @key: node key
+ * @inum: target inode number
+ * @padding1: reserved for future, zeroes
+ * @type: type of the target inode (%UBIFS_ITYPE_REG, %UBIFS_ITYPE_DIR, etc)
+ * @nlen: name length
+ * @padding2: reserved for future, zeroes
+ * @name: zero-terminated name
+ *
+ * Note, do not forget to amend 'zero_dent_node_unused()' function when
+ * changing the padding fields.
+ */
+struct ubifs_dent_node {
+ struct ubifs_ch ch;
+ __u8 key[UBIFS_MAX_KEY_LEN];
+ __le64 inum;
+ __u8 padding1;
+ __u8 type;
+ __le16 nlen;
+ __u8 padding2[4]; /* Watch 'zero_dent_node_unused()' if changing! */
+ __u8 name[];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_data_node - data node.
+ * @ch: common header
+ * @key: node key
+ * @size: uncompressed data size in bytes
+ * @compr_type: compression type (%UBIFS_COMPR_NONE, %UBIFS_COMPR_LZO, etc)
+ * @padding: reserved for future, zeroes
+ * @data: data
+ *
+ * Note, do not forget to amend 'zero_data_node_unused()' function when
+ * changing the padding fields.
+ */
+struct ubifs_data_node {
+ struct ubifs_ch ch;
+ __u8 key[UBIFS_MAX_KEY_LEN];
+ __le32 size;
+ __le16 compr_type;
+ __u8 padding[2]; /* Watch 'zero_data_node_unused()' if changing! */
+ __u8 data[];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_trun_node - truncation node.
+ * @ch: common header
+ * @inum: truncated inode number
+ * @padding: reserved for future, zeroes
+ * @old_size: size before truncation
+ * @new_size: size after truncation
+ *
+ * This node exists only in the journal and never goes to the main area. Note,
+ * do not forget to amend 'zero_trun_node_unused()' function when changing the
+ * padding fields.
+ */
+struct ubifs_trun_node {
+ struct ubifs_ch ch;
+ __le32 inum;
+ __u8 padding[12]; /* Watch 'zero_trun_node_unused()' if changing! */
+ __le64 old_size;
+ __le64 new_size;
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_pad_node - padding node.
+ * @ch: common header
+ * @pad_len: how many bytes after this node are unused (because padded)
+ * @padding: reserved for future, zeroes
+ */
+struct ubifs_pad_node {
+ struct ubifs_ch ch;
+ __le32 pad_len;
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_sb_node - superblock node.
+ * @ch: common header
+ * @padding: reserved for future, zeroes
+ * @key_hash: type of hash function used in keys
+ * @key_fmt: format of the key
+ * @flags: file-system flags (%UBIFS_FLG_BIGLPT, etc)
+ * @min_io_size: minimal input/output unit size
+ * @leb_size: logical eraseblock size in bytes
+ * @leb_cnt: count of LEBs used by file-system
+ * @max_leb_cnt: maximum count of LEBs used by file-system
+ * @max_bud_bytes: maximum amount of data stored in buds
+ * @log_lebs: log size in logical eraseblocks
+ * @lpt_lebs: number of LEBs used for lprops table
+ * @orph_lebs: number of LEBs used for recording orphans
+ * @jhead_cnt: count of journal heads
+ * @fanout: tree fanout (max. number of links per indexing node)
+ * @lsave_cnt: number of LEB numbers in LPT's save table
+ * @fmt_version: UBIFS on-flash format version
+ * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc)
+ * @padding1: reserved for future, zeroes
+ * @rp_uid: reserve pool UID
+ * @rp_gid: reserve pool GID
+ * @rp_size: size of the reserved pool in bytes
+ * @padding2: reserved for future, zeroes
+ * @time_gran: time granularity in nanoseconds
+ * @uuid: UUID generated when the file system image was created
+ * @ro_compat_version: UBIFS R/O compatibility version
+ */
+struct ubifs_sb_node {
+ struct ubifs_ch ch;
+ __u8 padding[2];
+ __u8 key_hash;
+ __u8 key_fmt;
+ __le32 flags;
+ __le32 min_io_size;
+ __le32 leb_size;
+ __le32 leb_cnt;
+ __le32 max_leb_cnt;
+ __le64 max_bud_bytes;
+ __le32 log_lebs;
+ __le32 lpt_lebs;
+ __le32 orph_lebs;
+ __le32 jhead_cnt;
+ __le32 fanout;
+ __le32 lsave_cnt;
+ __le32 fmt_version;
+ __le16 default_compr;
+ __u8 padding1[2];
+ __le32 rp_uid;
+ __le32 rp_gid;
+ __le64 rp_size;
+ __le32 time_gran;
+ __u8 uuid[16];
+ __le32 ro_compat_version;
+ __u8 padding2[3968];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_mst_node - master node.
+ * @ch: common header
+ * @highest_inum: highest inode number in the committed index
+ * @cmt_no: commit number
+ * @flags: various flags (%UBIFS_MST_DIRTY, etc)
+ * @log_lnum: start of the log
+ * @root_lnum: LEB number of the root indexing node
+ * @root_offs: offset within @root_lnum
+ * @root_len: root indexing node length
+ * @gc_lnum: LEB reserved for garbage collection (%-1 value means the LEB was
+ * not reserved and should be reserved on mount)
+ * @ihead_lnum: LEB number of index head
+ * @ihead_offs: offset of index head
+ * @index_size: size of index on flash
+ * @total_free: total free space in bytes
+ * @total_dirty: total dirty space in bytes
+ * @total_used: total used space in bytes (includes only data LEBs)
+ * @total_dead: total dead space in bytes (includes only data LEBs)
+ * @total_dark: total dark space in bytes (includes only data LEBs)
+ * @lpt_lnum: LEB number of LPT root nnode
+ * @lpt_offs: offset of LPT root nnode
+ * @nhead_lnum: LEB number of LPT head
+ * @nhead_offs: offset of LPT head
+ * @ltab_lnum: LEB number of LPT's own lprops table
+ * @ltab_offs: offset of LPT's own lprops table
+ * @lsave_lnum: LEB number of LPT's save table (big model only)
+ * @lsave_offs: offset of LPT's save table (big model only)
+ * @lscan_lnum: LEB number of last LPT scan
+ * @empty_lebs: number of empty logical eraseblocks
+ * @idx_lebs: number of indexing logical eraseblocks
+ * @leb_cnt: count of LEBs used by file-system
+ * @padding: reserved for future, zeroes
+ */
+struct ubifs_mst_node {
+ struct ubifs_ch ch;
+ __le64 highest_inum;
+ __le64 cmt_no;
+ __le32 flags;
+ __le32 log_lnum;
+ __le32 root_lnum;
+ __le32 root_offs;
+ __le32 root_len;
+ __le32 gc_lnum;
+ __le32 ihead_lnum;
+ __le32 ihead_offs;
+ __le64 index_size;
+ __le64 total_free;
+ __le64 total_dirty;
+ __le64 total_used;
+ __le64 total_dead;
+ __le64 total_dark;
+ __le32 lpt_lnum;
+ __le32 lpt_offs;
+ __le32 nhead_lnum;
+ __le32 nhead_offs;
+ __le32 ltab_lnum;
+ __le32 ltab_offs;
+ __le32 lsave_lnum;
+ __le32 lsave_offs;
+ __le32 lscan_lnum;
+ __le32 empty_lebs;
+ __le32 idx_lebs;
+ __le32 leb_cnt;
+ __u8 padding[344];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_ref_node - logical eraseblock reference node.
+ * @ch: common header
+ * @lnum: the referred logical eraseblock number
+ * @offs: start offset in the referred LEB
+ * @jhead: journal head number
+ * @padding: reserved for future, zeroes
+ */
+struct ubifs_ref_node {
+ struct ubifs_ch ch;
+ __le32 lnum;
+ __le32 offs;
+ __le32 jhead;
+ __u8 padding[28];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_branch - key/reference/length branch
+ * @lnum: LEB number of the target node
+ * @offs: offset within @lnum
+ * @len: target node length
+ * @key: key
+ */
+struct ubifs_branch {
+ __le32 lnum;
+ __le32 offs;
+ __le32 len;
+ __u8 key[];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_idx_node - indexing node.
+ * @ch: common header
+ * @child_cnt: number of child index nodes
+ * @level: tree level
+ * @branches: LEB number / offset / length / key branches
+ */
+struct ubifs_idx_node {
+ struct ubifs_ch ch;
+ __le16 child_cnt;
+ __le16 level;
+ __u8 branches[];
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_cs_node - commit start node.
+ * @ch: common header
+ * @cmt_no: commit number
+ */
+struct ubifs_cs_node {
+ struct ubifs_ch ch;
+ __le64 cmt_no;
+} __attribute__ ((packed));
+
+/**
+ * struct ubifs_orph_node - orphan node.
+ * @ch: common header
+ * @cmt_no: commit number (also top bit is set on the last node of the commit)
+ * @inos: inode numbers of orphans
+ */
+struct ubifs_orph_node {
+ struct ubifs_ch ch;
+ __le64 cmt_no;
+ __le64 inos[];
+} __attribute__ ((packed));
+
+#endif /* __UBIFS_MEDIA_H__ */
diff --git a/qemu/roms/u-boot/fs/ubifs/ubifs.c b/qemu/roms/u-boot/fs/ubifs/ubifs.c
new file mode 100644
index 000000000..273c0a963
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/ubifs.c
@@ -0,0 +1,751 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation.
+ *
+ * (C) Copyright 2008-2010
+ * Stefan Roese, DENX Software Engineering, sr@denx.de.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+#include "ubifs.h"
+#include <u-boot/zlib.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* compress.c */
+
+/*
+ * We need a wrapper for zunzip() because the parameters are
+ * incompatible with the lzo decompressor.
+ */
+static int gzip_decompress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len)
+{
+ return zunzip(out, *out_len, (unsigned char *)in,
+ (unsigned long *)out_len, 0, 0);
+}
+
+/* Fake description object for the "none" compressor */
+static struct ubifs_compressor none_compr = {
+ .compr_type = UBIFS_COMPR_NONE,
+ .name = "no compression",
+ .capi_name = "",
+ .decompress = NULL,
+};
+
+static struct ubifs_compressor lzo_compr = {
+ .compr_type = UBIFS_COMPR_LZO,
+ .name = "LZO",
+ .capi_name = "lzo",
+ .decompress = lzo1x_decompress_safe,
+};
+
+static struct ubifs_compressor zlib_compr = {
+ .compr_type = UBIFS_COMPR_ZLIB,
+ .name = "zlib",
+ .capi_name = "deflate",
+ .decompress = gzip_decompress,
+};
+
+/* All UBIFS compressors */
+struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
+
+/**
+ * ubifs_decompress - decompress data.
+ * @in_buf: data to decompress
+ * @in_len: length of the data to decompress
+ * @out_buf: output buffer where decompressed data should
+ * @out_len: output length is returned here
+ * @compr_type: type of compression
+ *
+ * This function decompresses data from buffer @in_buf into buffer @out_buf.
+ * The length of the uncompressed data is returned in @out_len. This functions
+ * returns %0 on success or a negative error code on failure.
+ */
+int ubifs_decompress(const void *in_buf, int in_len, void *out_buf,
+ int *out_len, int compr_type)
+{
+ int err;
+ struct ubifs_compressor *compr;
+
+ if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) {
+ ubifs_err("invalid compression type %d", compr_type);
+ return -EINVAL;
+ }
+
+ compr = ubifs_compressors[compr_type];
+
+ if (unlikely(!compr->capi_name)) {
+ ubifs_err("%s compression is not compiled in", compr->name);
+ return -EINVAL;
+ }
+
+ if (compr_type == UBIFS_COMPR_NONE) {
+ memcpy(out_buf, in_buf, in_len);
+ *out_len = in_len;
+ return 0;
+ }
+
+ err = compr->decompress(in_buf, in_len, out_buf, (size_t *)out_len);
+ if (err)
+ ubifs_err("cannot decompress %d bytes, compressor %s, "
+ "error %d", in_len, compr->name, err);
+
+ return err;
+}
+
+/**
+ * compr_init - initialize a compressor.
+ * @compr: compressor description object
+ *
+ * This function initializes the requested compressor and returns zero in case
+ * of success or a negative error code in case of failure.
+ */
+static int __init compr_init(struct ubifs_compressor *compr)
+{
+ ubifs_compressors[compr->compr_type] = compr;
+
+#ifdef CONFIG_NEEDS_MANUAL_RELOC
+ ubifs_compressors[compr->compr_type]->name += gd->reloc_off;
+ ubifs_compressors[compr->compr_type]->capi_name += gd->reloc_off;
+ ubifs_compressors[compr->compr_type]->decompress += gd->reloc_off;
+#endif
+
+ return 0;
+}
+
+/**
+ * ubifs_compressors_init - initialize UBIFS compressors.
+ *
+ * This function initializes the compressor which were compiled in. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int __init ubifs_compressors_init(void)
+{
+ int err;
+
+ err = compr_init(&lzo_compr);
+ if (err)
+ return err;
+
+ err = compr_init(&zlib_compr);
+ if (err)
+ return err;
+
+ err = compr_init(&none_compr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/*
+ * ubifsls...
+ */
+
+static int filldir(struct ubifs_info *c, const char *name, int namlen,
+ u64 ino, unsigned int d_type)
+{
+ struct inode *inode;
+ char filetime[32];
+
+ switch (d_type) {
+ case UBIFS_ITYPE_REG:
+ printf("\t");
+ break;
+ case UBIFS_ITYPE_DIR:
+ printf("<DIR>\t");
+ break;
+ case UBIFS_ITYPE_LNK:
+ printf("<LNK>\t");
+ break;
+ default:
+ printf("other\t");
+ break;
+ }
+
+ inode = ubifs_iget(c->vfs_sb, ino);
+ if (IS_ERR(inode)) {
+ printf("%s: Error in ubifs_iget(), ino=%lld ret=%p!\n",
+ __func__, ino, inode);
+ return -1;
+ }
+ ctime_r((time_t *)&inode->i_mtime, filetime);
+ printf("%9lld %24.24s ", inode->i_size, filetime);
+ ubifs_iput(inode);
+
+ printf("%s\n", name);
+
+ return 0;
+}
+
+static int ubifs_printdir(struct file *file, void *dirent)
+{
+ int err, over = 0;
+ struct qstr nm;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent;
+ struct inode *dir = file->f_path.dentry->d_inode;
+ struct ubifs_info *c = dir->i_sb->s_fs_info;
+
+ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+
+ if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
+ /*
+ * The directory was seek'ed to a senseless position or there
+ * are no more entries.
+ */
+ return 0;
+
+ if (file->f_pos == 1) {
+ /* Find the first entry in TNC and save it */
+ lowest_dent_key(c, &key, dir->i_ino);
+ nm.name = NULL;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+
+ file->f_pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+ dent = file->private_data;
+ if (!dent) {
+ /*
+ * The directory was seek'ed to and is now readdir'ed.
+ * Find the entry corresponding to @file->f_pos or the
+ * closest one.
+ */
+ dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
+ nm.name = NULL;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+ file->f_pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ }
+
+ while (1) {
+ dbg_gen("feed '%s', ino %llu, new f_pos %#x",
+ dent->name, (unsigned long long)le64_to_cpu(dent->inum),
+ key_hash_flash(c, &dent->key));
+ ubifs_assert(le64_to_cpu(dent->ch.sqnum) > ubifs_inode(dir)->creat_sqnum);
+
+ nm.len = le16_to_cpu(dent->nlen);
+ over = filldir(c, (char *)dent->name, nm.len,
+ le64_to_cpu(dent->inum), dent->type);
+ if (over)
+ return 0;
+
+ /* Switch to the next entry */
+ key_read(c, &dent->key, &key);
+ nm.name = (char *)dent->name;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+
+ kfree(file->private_data);
+ file->f_pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ cond_resched();
+ }
+
+out:
+ if (err != -ENOENT) {
+ ubifs_err("cannot find next direntry, error %d", err);
+ return err;
+ }
+
+ kfree(file->private_data);
+ file->private_data = NULL;
+ file->f_pos = 2;
+ return 0;
+}
+
+static int ubifs_finddir(struct super_block *sb, char *dirname,
+ unsigned long root_inum, unsigned long *inum)
+{
+ int err;
+ struct qstr nm;
+ union ubifs_key key;
+ struct ubifs_dent_node *dent;
+ struct ubifs_info *c;
+ struct file *file;
+ struct dentry *dentry;
+ struct inode *dir;
+ int ret = 0;
+
+ file = kzalloc(sizeof(struct file), 0);
+ dentry = kzalloc(sizeof(struct dentry), 0);
+ dir = kzalloc(sizeof(struct inode), 0);
+ if (!file || !dentry || !dir) {
+ printf("%s: Error, no memory for malloc!\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dir->i_sb = sb;
+ file->f_path.dentry = dentry;
+ file->f_path.dentry->d_parent = dentry;
+ file->f_path.dentry->d_inode = dir;
+ file->f_path.dentry->d_inode->i_ino = root_inum;
+ c = sb->s_fs_info;
+
+ dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+
+ /* Find the first entry in TNC and save it */
+ lowest_dent_key(c, &key, dir->i_ino);
+ nm.name = NULL;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+
+ file->f_pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+
+ while (1) {
+ dbg_gen("feed '%s', ino %llu, new f_pos %#x",
+ dent->name, (unsigned long long)le64_to_cpu(dent->inum),
+ key_hash_flash(c, &dent->key));
+ ubifs_assert(le64_to_cpu(dent->ch.sqnum) > ubifs_inode(dir)->creat_sqnum);
+
+ nm.len = le16_to_cpu(dent->nlen);
+ if ((strncmp(dirname, (char *)dent->name, nm.len) == 0) &&
+ (strlen(dirname) == nm.len)) {
+ *inum = le64_to_cpu(dent->inum);
+ ret = 1;
+ goto out_free;
+ }
+
+ /* Switch to the next entry */
+ key_read(c, &dent->key, &key);
+ nm.name = (char *)dent->name;
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ err = PTR_ERR(dent);
+ goto out;
+ }
+
+ kfree(file->private_data);
+ file->f_pos = key_hash_flash(c, &dent->key);
+ file->private_data = dent;
+ cond_resched();
+ }
+
+out:
+ if (err != -ENOENT)
+ ubifs_err("cannot find next direntry, error %d", err);
+
+out_free:
+ if (file->private_data)
+ kfree(file->private_data);
+ if (file)
+ free(file);
+ if (dentry)
+ free(dentry);
+ if (dir)
+ free(dir);
+
+ return ret;
+}
+
+static unsigned long ubifs_findfile(struct super_block *sb, char *filename)
+{
+ int ret;
+ char *next;
+ char fpath[128];
+ char symlinkpath[128];
+ char *name = fpath;
+ unsigned long root_inum = 1;
+ unsigned long inum;
+ int symlink_count = 0; /* Don't allow symlink recursion */
+ char link_name[64];
+
+ strcpy(fpath, filename);
+
+ /* Remove all leading slashes */
+ while (*name == '/')
+ name++;
+
+ /*
+ * Handle root-direcoty ('/')
+ */
+ inum = root_inum;
+ if (!name || *name == '\0')
+ return inum;
+
+ for (;;) {
+ struct inode *inode;
+ struct ubifs_inode *ui;
+
+ /* Extract the actual part from the pathname. */
+ next = strchr(name, '/');
+ if (next) {
+ /* Remove all leading slashes. */
+ while (*next == '/')
+ *(next++) = '\0';
+ }
+
+ ret = ubifs_finddir(sb, name, root_inum, &inum);
+ if (!ret)
+ return 0;
+ inode = ubifs_iget(sb, inum);
+
+ if (!inode)
+ return 0;
+ ui = ubifs_inode(inode);
+
+ if ((inode->i_mode & S_IFMT) == S_IFLNK) {
+ char buf[128];
+
+ /* We have some sort of symlink recursion, bail out */
+ if (symlink_count++ > 8) {
+ printf("Symlink recursion, aborting\n");
+ return 0;
+ }
+ memcpy(link_name, ui->data, ui->data_len);
+ link_name[ui->data_len] = '\0';
+
+ if (link_name[0] == '/') {
+ /* Absolute path, redo everything without
+ * the leading slash */
+ next = name = link_name + 1;
+ root_inum = 1;
+ continue;
+ }
+ /* Relative to cur dir */
+ sprintf(buf, "%s/%s",
+ link_name, next == NULL ? "" : next);
+ memcpy(symlinkpath, buf, sizeof(buf));
+ next = name = symlinkpath;
+ continue;
+ }
+
+ /*
+ * Check if directory with this name exists
+ */
+
+ /* Found the node! */
+ if (!next || *next == '\0')
+ return inum;
+
+ root_inum = inum;
+ name = next;
+ }
+
+ return 0;
+}
+
+int ubifs_ls(char *filename)
+{
+ struct ubifs_info *c = ubifs_sb->s_fs_info;
+ struct file *file;
+ struct dentry *dentry;
+ struct inode *dir;
+ void *dirent = NULL;
+ unsigned long inum;
+ int ret = 0;
+
+ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY);
+ inum = ubifs_findfile(ubifs_sb, filename);
+ if (!inum) {
+ ret = -1;
+ goto out;
+ }
+
+ file = kzalloc(sizeof(struct file), 0);
+ dentry = kzalloc(sizeof(struct dentry), 0);
+ dir = kzalloc(sizeof(struct inode), 0);
+ if (!file || !dentry || !dir) {
+ printf("%s: Error, no memory for malloc!\n", __func__);
+ ret = -ENOMEM;
+ goto out_mem;
+ }
+
+ dir->i_sb = ubifs_sb;
+ file->f_path.dentry = dentry;
+ file->f_path.dentry->d_parent = dentry;
+ file->f_path.dentry->d_inode = dir;
+ file->f_path.dentry->d_inode->i_ino = inum;
+ file->f_pos = 1;
+ file->private_data = NULL;
+ ubifs_printdir(file, dirent);
+
+out_mem:
+ if (file)
+ free(file);
+ if (dentry)
+ free(dentry);
+ if (dir)
+ free(dir);
+
+out:
+ ubi_close_volume(c->ubi);
+ return ret;
+}
+
+/*
+ * ubifsload...
+ */
+
+/* file.c */
+
+static inline void *kmap(struct page *page)
+{
+ return page->addr;
+}
+
+static int read_block(struct inode *inode, void *addr, unsigned int block,
+ struct ubifs_data_node *dn)
+{
+ struct ubifs_info *c = inode->i_sb->s_fs_info;
+ int err, len, out_len;
+ union ubifs_key key;
+ unsigned int dlen;
+
+ data_key_init(c, &key, inode->i_ino, block);
+ err = ubifs_tnc_lookup(c, &key, dn);
+ if (err) {
+ if (err == -ENOENT)
+ /* Not found, so it must be a hole */
+ memset(addr, 0, UBIFS_BLOCK_SIZE);
+ return err;
+ }
+
+ ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
+
+ len = le32_to_cpu(dn->size);
+ if (len <= 0 || len > UBIFS_BLOCK_SIZE)
+ goto dump;
+
+ dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
+ out_len = UBIFS_BLOCK_SIZE;
+ err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
+ le16_to_cpu(dn->compr_type));
+ if (err || len != out_len)
+ goto dump;
+
+ /*
+ * Data length can be less than a full block, even for blocks that are
+ * not the last in the file (e.g., as a result of making a hole and
+ * appending data). Ensure that the remainder is zeroed out.
+ */
+ if (len < UBIFS_BLOCK_SIZE)
+ memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
+
+ return 0;
+
+dump:
+ ubifs_err("bad data node (block %u, inode %lu)",
+ block, inode->i_ino);
+ dbg_dump_node(c, dn);
+ return -EINVAL;
+}
+
+static int do_readpage(struct ubifs_info *c, struct inode *inode,
+ struct page *page, int last_block_size)
+{
+ void *addr;
+ int err = 0, i;
+ unsigned int block, beyond;
+ struct ubifs_data_node *dn;
+ loff_t i_size = inode->i_size;
+
+ dbg_gen("ino %lu, pg %lu, i_size %lld",
+ inode->i_ino, page->index, i_size);
+
+ addr = kmap(page);
+
+ block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
+ beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
+ if (block >= beyond) {
+ /* Reading beyond inode */
+ memset(addr, 0, PAGE_CACHE_SIZE);
+ goto out;
+ }
+
+ dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
+ if (!dn)
+ return -ENOMEM;
+
+ i = 0;
+ while (1) {
+ int ret;
+
+ if (block >= beyond) {
+ /* Reading beyond inode */
+ err = -ENOENT;
+ memset(addr, 0, UBIFS_BLOCK_SIZE);
+ } else {
+ /*
+ * Reading last block? Make sure to not write beyond
+ * the requested size in the destination buffer.
+ */
+ if (((block + 1) == beyond) || last_block_size) {
+ void *buff;
+ int dlen;
+
+ /*
+ * We need to buffer the data locally for the
+ * last block. This is to not pad the
+ * destination area to a multiple of
+ * UBIFS_BLOCK_SIZE.
+ */
+ buff = malloc(UBIFS_BLOCK_SIZE);
+ if (!buff) {
+ printf("%s: Error, malloc fails!\n",
+ __func__);
+ err = -ENOMEM;
+ break;
+ }
+
+ /* Read block-size into temp buffer */
+ ret = read_block(inode, buff, block, dn);
+ if (ret) {
+ err = ret;
+ if (err != -ENOENT) {
+ free(buff);
+ break;
+ }
+ }
+
+ if (last_block_size)
+ dlen = last_block_size;
+ else
+ dlen = le32_to_cpu(dn->size);
+
+ /* Now copy required size back to dest */
+ memcpy(addr, buff, dlen);
+
+ free(buff);
+ } else {
+ ret = read_block(inode, addr, block, dn);
+ if (ret) {
+ err = ret;
+ if (err != -ENOENT)
+ break;
+ }
+ }
+ }
+ if (++i >= UBIFS_BLOCKS_PER_PAGE)
+ break;
+ block += 1;
+ addr += UBIFS_BLOCK_SIZE;
+ }
+ if (err) {
+ if (err == -ENOENT) {
+ /* Not found, so it must be a hole */
+ dbg_gen("hole");
+ goto out_free;
+ }
+ ubifs_err("cannot read page %lu of inode %lu, error %d",
+ page->index, inode->i_ino, err);
+ goto error;
+ }
+
+out_free:
+ kfree(dn);
+out:
+ return 0;
+
+error:
+ kfree(dn);
+ return err;
+}
+
+int ubifs_load(char *filename, u32 addr, u32 size)
+{
+ struct ubifs_info *c = ubifs_sb->s_fs_info;
+ unsigned long inum;
+ struct inode *inode;
+ struct page page;
+ int err = 0;
+ int i;
+ int count;
+ int last_block_size = 0;
+
+ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READONLY);
+ /* ubifs_findfile will resolve symlinks, so we know that we get
+ * the real file here */
+ inum = ubifs_findfile(ubifs_sb, filename);
+ if (!inum) {
+ err = -1;
+ goto out;
+ }
+
+ /*
+ * Read file inode
+ */
+ inode = ubifs_iget(ubifs_sb, inum);
+ if (IS_ERR(inode)) {
+ printf("%s: Error reading inode %ld!\n", __func__, inum);
+ err = PTR_ERR(inode);
+ goto out;
+ }
+
+ /*
+ * If no size was specified or if size bigger than filesize
+ * set size to filesize
+ */
+ if ((size == 0) || (size > inode->i_size))
+ size = inode->i_size;
+
+ count = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
+ printf("Loading file '%s' to addr 0x%08x with size %d (0x%08x)...\n",
+ filename, addr, size, size);
+
+ page.addr = (void *)addr;
+ page.index = 0;
+ page.inode = inode;
+ for (i = 0; i < count; i++) {
+ /*
+ * Make sure to not read beyond the requested size
+ */
+ if (((i + 1) == count) && (size < inode->i_size))
+ last_block_size = size - (i * PAGE_SIZE);
+
+ err = do_readpage(c, inode, &page, last_block_size);
+ if (err)
+ break;
+
+ page.addr += PAGE_SIZE;
+ page.index++;
+ }
+
+ if (err)
+ printf("Error reading file '%s'\n", filename);
+ else {
+ setenv_hex("filesize", size);
+ printf("Done\n");
+ }
+
+ ubifs_iput(inode);
+
+out:
+ ubi_close_volume(c->ubi);
+ return err;
+}
diff --git a/qemu/roms/u-boot/fs/ubifs/ubifs.h b/qemu/roms/u-boot/fs/ubifs/ubifs.h
new file mode 100644
index 000000000..221320157
--- /dev/null
+++ b/qemu/roms/u-boot/fs/ubifs/ubifs.h
@@ -0,0 +1,2154 @@
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2006-2008 Nokia Corporation
+ *
+ * (C) Copyright 2008-2009
+ * Stefan Roese, DENX Software Engineering, sr@denx.de.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Adrian Hunter
+ */
+
+#ifndef __UBIFS_H__
+#define __UBIFS_H__
+
+#if 0 /* Enable for debugging output */
+#define CONFIG_UBIFS_FS_DEBUG
+#define CONFIG_UBIFS_FS_DEBUG_MSG_LVL 3
+#endif
+
+#include <ubi_uboot.h>
+#include <linux/ctype.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include "ubifs-media.h"
+
+struct dentry;
+struct file;
+struct iattr;
+struct kstat;
+struct vfsmount;
+
+extern struct super_block *ubifs_sb;
+
+extern unsigned int ubifs_msg_flags;
+extern unsigned int ubifs_chk_flags;
+extern unsigned int ubifs_tst_flags;
+
+#define pgoff_t unsigned long
+
+/*
+ * We "simulate" the Linux page struct much simpler here
+ */
+struct page {
+ pgoff_t index;
+ void *addr;
+ struct inode *inode;
+};
+
+void iput(struct inode *inode);
+
+/*
+ * The atomic operations are used for budgeting etc which is not
+ * needed for the read-only U-Boot implementation:
+ */
+#define atomic_long_inc(a)
+#define atomic_long_dec(a)
+#define atomic_long_sub(a, b)
+
+/* linux/include/time.h */
+
+struct timespec {
+ time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
+/* linux/include/dcache.h */
+
+/*
+ * "quick string" -- eases parameter passing, but more importantly
+ * saves "metadata" about the string (ie length and the hash).
+ *
+ * hash comes first so it snuggles against d_parent in the
+ * dentry.
+ */
+struct qstr {
+ unsigned int hash;
+ unsigned int len;
+ const char *name;
+};
+
+struct inode {
+ struct hlist_node i_hash;
+ struct list_head i_list;
+ struct list_head i_sb_list;
+ struct list_head i_dentry;
+ unsigned long i_ino;
+ unsigned int i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ dev_t i_rdev;
+ u64 i_version;
+ loff_t i_size;
+#ifdef __NEED_I_SIZE_ORDERED
+ seqcount_t i_size_seqcount;
+#endif
+ struct timespec i_atime;
+ struct timespec i_mtime;
+ struct timespec i_ctime;
+ unsigned int i_blkbits;
+ unsigned short i_bytes;
+ umode_t i_mode;
+ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
+ struct mutex i_mutex;
+ struct rw_semaphore i_alloc_sem;
+ const struct inode_operations *i_op;
+ const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ struct super_block *i_sb;
+ struct file_lock *i_flock;
+#ifdef CONFIG_QUOTA
+ struct dquot *i_dquot[MAXQUOTAS];
+#endif
+ struct list_head i_devices;
+ int i_cindex;
+
+ __u32 i_generation;
+
+#ifdef CONFIG_DNOTIFY
+ unsigned long i_dnotify_mask; /* Directory notify events */
+ struct dnotify_struct *i_dnotify; /* for directory notifications */
+#endif
+
+#ifdef CONFIG_INOTIFY
+ struct list_head inotify_watches; /* watches on this inode */
+ struct mutex inotify_mutex; /* protects the watches list */
+#endif
+
+ unsigned long i_state;
+ unsigned long dirtied_when; /* jiffies of first dirtying */
+
+ unsigned int i_flags;
+
+#ifdef CONFIG_SECURITY
+ void *i_security;
+#endif
+ void *i_private; /* fs or device private pointer */
+};
+
+struct super_block {
+ struct list_head s_list; /* Keep this first */
+ dev_t s_dev; /* search index; _not_ kdev_t */
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_dirt;
+ unsigned long long s_maxbytes; /* Max file size */
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ struct mutex s_lock;
+ int s_count;
+ int s_syncing;
+ int s_need_sync_fs;
+#ifdef CONFIG_SECURITY
+ void *s_security;
+#endif
+ struct xattr_handler **s_xattr;
+
+ struct list_head s_inodes; /* all inodes */
+ struct list_head s_dirty; /* dirty inodes */
+ struct list_head s_io; /* parked for writeback */
+ struct list_head s_more_io; /* parked for more writeback */
+ struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct list_head s_files;
+ /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
+ struct list_head s_dentry_lru; /* unused dentry lru */
+ int s_nr_dentry_unused; /* # of dentry on lru */
+
+ struct block_device *s_bdev;
+ struct mtd_info *s_mtd;
+ struct list_head s_instances;
+
+ int s_frozen;
+ wait_queue_head_t s_wait_unfrozen;
+
+ char s_id[32]; /* Informational name */
+
+ void *s_fs_info; /* Filesystem private info */
+
+ /*
+ * The next field is for VFS *only*. No filesystems have any business
+ * even looking at it. You had been warned.
+ */
+ struct mutex s_vfs_rename_mutex; /* Kludge */
+
+ /* Granularity of c/m/atime in ns.
+ Cannot be worse than a second */
+ u32 s_time_gran;
+
+ /*
+ * Filesystem subtype. If non-empty the filesystem type field
+ * in /proc/mounts will be "type.subtype"
+ */
+ char *s_subtype;
+
+ /*
+ * Saved mount options for lazy filesystems using
+ * generic_show_options()
+ */
+ char *s_options;
+};
+
+struct file_system_type {
+ const char *name;
+ int fs_flags;
+ int (*get_sb) (struct file_system_type *, int,
+ const char *, void *, struct vfsmount *);
+ void (*kill_sb) (struct super_block *);
+ struct module *owner;
+ struct file_system_type * next;
+ struct list_head fs_supers;
+};
+
+struct vfsmount {
+ struct list_head mnt_hash;
+ struct vfsmount *mnt_parent; /* fs we are mounted on */
+ struct dentry *mnt_mountpoint; /* dentry of mountpoint */
+ struct dentry *mnt_root; /* root of the mounted tree */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ struct list_head mnt_mounts; /* list of children, anchored here */
+ struct list_head mnt_child; /* and going through their mnt_child */
+ int mnt_flags;
+ /* 4 bytes hole on 64bits arches */
+ const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
+ struct list_head mnt_list;
+ struct list_head mnt_expire; /* link in fs-specific expiry list */
+ struct list_head mnt_share; /* circular list of shared mounts */
+ struct list_head mnt_slave_list;/* list of slave mounts */
+ struct list_head mnt_slave; /* slave list entry */
+ struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */
+ struct mnt_namespace *mnt_ns; /* containing namespace */
+ int mnt_id; /* mount identifier */
+ int mnt_group_id; /* peer group identifier */
+ /*
+ * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount
+ * to let these frequently modified fields in a separate cache line
+ * (so that reads of mnt_flags wont ping-pong on SMP machines)
+ */
+ int mnt_expiry_mark; /* true if marked for expiry */
+ int mnt_pinned;
+ int mnt_ghosts;
+ /*
+ * This value is not stable unless all of the mnt_writers[] spinlocks
+ * are held, and all mnt_writer[]s on this mount have 0 as their ->count
+ */
+};
+
+struct path {
+ struct vfsmount *mnt;
+ struct dentry *dentry;
+};
+
+struct file {
+ struct path f_path;
+#define f_dentry f_path.dentry
+#define f_vfsmnt f_path.mnt
+ const struct file_operations *f_op;
+ unsigned int f_flags;
+ loff_t f_pos;
+ unsigned int f_uid, f_gid;
+
+ u64 f_version;
+#ifdef CONFIG_SECURITY
+ void *f_security;
+#endif
+ /* needed for tty driver, and maybe others */
+ void *private_data;
+
+#ifdef CONFIG_EPOLL
+ /* Used by fs/eventpoll.c to link all the hooks to this file */
+ struct list_head f_ep_links;
+ spinlock_t f_ep_lock;
+#endif /* #ifdef CONFIG_EPOLL */
+#ifdef CONFIG_DEBUG_WRITECOUNT
+ unsigned long f_mnt_write_state;
+#endif
+};
+
+/*
+ * get_seconds() not really needed in the read-only implmentation
+ */
+#define get_seconds() 0
+
+/* 4k page size */
+#define PAGE_CACHE_SHIFT 12
+#define PAGE_CACHE_SIZE (1 << PAGE_CACHE_SHIFT)
+
+/* Page cache limit. The filesystems should put that into their s_maxbytes
+ limits, otherwise bad things can happen in VM. */
+#if BITS_PER_LONG==32
+#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+#elif BITS_PER_LONG==64
+#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
+#endif
+
+#define INT_MAX ((int)(~0U>>1))
+#define INT_MIN (-INT_MAX - 1)
+#define LLONG_MAX ((long long)(~0ULL>>1))
+
+/*
+ * These are the fs-independent mount-flags: up to 32 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define MS_NODIRATIME 2048 /* Do not update directory access times */
+#define MS_BIND 4096
+#define MS_MOVE 8192
+#define MS_REC 16384
+#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
+ MS_VERBOSE is deprecated. */
+#define MS_SILENT 32768
+#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
+#define MS_UNBINDABLE (1<<17) /* change to unbindable */
+#define MS_PRIVATE (1<<18) /* change to private */
+#define MS_SLAVE (1<<19) /* change to slave */
+#define MS_SHARED (1<<20) /* change to shared */
+#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
+#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+#define MS_ACTIVE (1<<30)
+#define MS_NOUSER (1<<31)
+
+#define I_NEW 8
+
+/* Inode flags - they have nothing to superblock flags now */
+
+#define S_SYNC 1 /* Writes are synced at once */
+#define S_NOATIME 2 /* Do not update access times */
+#define S_APPEND 4 /* Append-only file */
+#define S_IMMUTABLE 8 /* Immutable file */
+#define S_DEAD 16 /* removed, but still open directory */
+#define S_NOQUOTA 32 /* Inode is not counted to quota */
+#define S_DIRSYNC 64 /* Directory modifications are synchronous */
+#define S_NOCMTIME 128 /* Do not update file c/mtime */
+#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE 512 /* Inode is fs-internal */
+
+/* include/linux/stat.h */
+
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+
+/* include/linux/fs.h */
+
+/*
+ * File types
+ *
+ * NOTE! These match bits 12..15 of stat.st_mode
+ * (ie "(i_mode >> 12) & 15").
+ */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+#define I_DIRTY_SYNC 1
+#define I_DIRTY_DATASYNC 2
+#define I_DIRTY_PAGES 4
+#define I_NEW 8
+#define I_WILL_FREE 16
+#define I_FREEING 32
+#define I_CLEAR 64
+#define __I_LOCK 7
+#define I_LOCK (1 << __I_LOCK)
+#define __I_SYNC 8
+#define I_SYNC (1 << __I_SYNC)
+
+#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+
+/* linux/include/dcache.h */
+
+#define DNAME_INLINE_LEN_MIN 36
+
+struct dentry {
+ unsigned int d_flags; /* protected by d_lock */
+ spinlock_t d_lock; /* per dentry lock */
+ struct inode *d_inode; /* Where the name belongs to - NULL is
+ * negative */
+ /*
+ * The next three fields are touched by __d_lookup. Place them here
+ * so they all fit in a cache line.
+ */
+ struct hlist_node d_hash; /* lookup hash list */
+ struct dentry *d_parent; /* parent directory */
+ struct qstr d_name;
+
+ struct list_head d_lru; /* LRU list */
+ /*
+ * d_child and d_rcu can share memory
+ */
+ struct list_head d_subdirs; /* our children */
+ struct list_head d_alias; /* inode alias list */
+ unsigned long d_time; /* used by d_revalidate */
+ struct super_block *d_sb; /* The root of the dentry tree */
+ void *d_fsdata; /* fs-specific data */
+#ifdef CONFIG_PROFILING
+ struct dcookie_struct *d_cookie; /* cookie, if any */
+#endif
+ int d_mounted;
+ unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
+};
+
+static inline ino_t parent_ino(struct dentry *dentry)
+{
+ ino_t res;
+
+ spin_lock(&dentry->d_lock);
+ res = dentry->d_parent->d_inode->i_ino;
+ spin_unlock(&dentry->d_lock);
+ return res;
+}
+
+/* debug.c */
+
+#define DEFINE_SPINLOCK(...)
+#define module_param_named(...)
+
+/* misc.h */
+#define mutex_lock_nested(...)
+#define mutex_unlock_nested(...)
+#define mutex_is_locked(...) 0
+
+/* Version of this UBIFS implementation */
+#define UBIFS_VERSION 1
+
+/* Normal UBIFS messages */
+#ifdef CONFIG_UBIFS_SILENCE_MSG
+#define ubifs_msg(fmt, ...)
+#else
+#define ubifs_msg(fmt, ...) \
+ printk(KERN_NOTICE "UBIFS: " fmt "\n", ##__VA_ARGS__)
+#endif
+/* UBIFS error messages */
+#define ubifs_err(fmt, ...) \
+ printk(KERN_ERR "UBIFS error (pid %d): %s: " fmt "\n", 0, \
+ __func__, ##__VA_ARGS__)
+/* UBIFS warning messages */
+#define ubifs_warn(fmt, ...) \
+ printk(KERN_WARNING "UBIFS warning (pid %d): %s: " fmt "\n", \
+ 0, __func__, ##__VA_ARGS__)
+
+/* UBIFS file system VFS magic number */
+#define UBIFS_SUPER_MAGIC 0x24051905
+
+/* Number of UBIFS blocks per VFS page */
+#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE)
+#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT)
+
+/* "File system end of life" sequence number watermark */
+#define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL
+#define SQNUM_WATERMARK 0xFFFFFFFFFF000000ULL
+
+/*
+ * Minimum amount of LEBs reserved for the index. At present the index needs at
+ * least 2 LEBs: one for the index head and one for in-the-gaps method (which
+ * currently does not cater for the index head and so excludes it from
+ * consideration).
+ */
+#define MIN_INDEX_LEBS 2
+
+/* Minimum amount of data UBIFS writes to the flash */
+#define MIN_WRITE_SZ (UBIFS_DATA_NODE_SZ + 8)
+
+/*
+ * Currently we do not support inode number overlapping and re-using, so this
+ * watermark defines dangerous inode number level. This should be fixed later,
+ * although it is difficult to exceed current limit. Another option is to use
+ * 64-bit inode numbers, but this means more overhead.
+ */
+#define INUM_WARN_WATERMARK 0xFFF00000
+#define INUM_WATERMARK 0xFFFFFF00
+
+/* Largest key size supported in this implementation */
+#define CUR_MAX_KEY_LEN UBIFS_SK_LEN
+
+/* Maximum number of entries in each LPT (LEB category) heap */
+#define LPT_HEAP_SZ 256
+
+/*
+ * Background thread name pattern. The numbers are UBI device and volume
+ * numbers.
+ */
+#define BGT_NAME_PATTERN "ubifs_bgt%d_%d"
+
+/* Default write-buffer synchronization timeout (5 secs) */
+#define DEFAULT_WBUF_TIMEOUT (5 * HZ)
+
+/* Maximum possible inode number (only 32-bit inodes are supported now) */
+#define MAX_INUM 0xFFFFFFFF
+
+/* Number of non-data journal heads */
+#define NONDATA_JHEADS_CNT 2
+
+/* Garbage collector head */
+#define GCHD 0
+/* Base journal head number */
+#define BASEHD 1
+/* First "general purpose" journal head */
+#define DATAHD 2
+
+/* 'No change' value for 'ubifs_change_lp()' */
+#define LPROPS_NC 0x80000001
+
+/*
+ * There is no notion of truncation key because truncation nodes do not exist
+ * in TNC. However, when replaying, it is handy to introduce fake "truncation"
+ * keys for truncation nodes because the code becomes simpler. So we define
+ * %UBIFS_TRUN_KEY type.
+ */
+#define UBIFS_TRUN_KEY UBIFS_KEY_TYPES_CNT
+
+/*
+ * How much a directory entry/extended attribute entry adds to the parent/host
+ * inode.
+ */
+#define CALC_DENT_SIZE(name_len) ALIGN(UBIFS_DENT_NODE_SZ + (name_len) + 1, 8)
+
+/* How much an extended attribute adds to the host inode */
+#define CALC_XATTR_BYTES(data_len) ALIGN(UBIFS_INO_NODE_SZ + (data_len) + 1, 8)
+
+/*
+ * Znodes which were not touched for 'OLD_ZNODE_AGE' seconds are considered
+ * "old", and znode which were touched last 'YOUNG_ZNODE_AGE' seconds ago are
+ * considered "young". This is used by shrinker when selecting znode to trim
+ * off.
+ */
+#define OLD_ZNODE_AGE 20
+#define YOUNG_ZNODE_AGE 5
+
+/*
+ * Some compressors, like LZO, may end up with more data then the input buffer.
+ * So UBIFS always allocates larger output buffer, to be sure the compressor
+ * will not corrupt memory in case of worst case compression.
+ */
+#define WORST_COMPR_FACTOR 2
+
+/* Maximum expected tree height for use by bottom_up_buf */
+#define BOTTOM_UP_HEIGHT 64
+
+/* Maximum number of data nodes to bulk-read */
+#define UBIFS_MAX_BULK_READ 32
+
+/*
+ * Lockdep classes for UBIFS inode @ui_mutex.
+ */
+enum {
+ WB_MUTEX_1 = 0,
+ WB_MUTEX_2 = 1,
+ WB_MUTEX_3 = 2,
+};
+
+/*
+ * Znode flags (actually, bit numbers which store the flags).
+ *
+ * DIRTY_ZNODE: znode is dirty
+ * COW_ZNODE: znode is being committed and a new instance of this znode has to
+ * be created before changing this znode
+ * OBSOLETE_ZNODE: znode is obsolete, which means it was deleted, but it is
+ * still in the commit list and the ongoing commit operation
+ * will commit it, and delete this znode after it is done
+ */
+enum {
+ DIRTY_ZNODE = 0,
+ COW_ZNODE = 1,
+ OBSOLETE_ZNODE = 2,
+};
+
+/*
+ * Commit states.
+ *
+ * COMMIT_RESTING: commit is not wanted
+ * COMMIT_BACKGROUND: background commit has been requested
+ * COMMIT_REQUIRED: commit is required
+ * COMMIT_RUNNING_BACKGROUND: background commit is running
+ * COMMIT_RUNNING_REQUIRED: commit is running and it is required
+ * COMMIT_BROKEN: commit failed
+ */
+enum {
+ COMMIT_RESTING = 0,
+ COMMIT_BACKGROUND,
+ COMMIT_REQUIRED,
+ COMMIT_RUNNING_BACKGROUND,
+ COMMIT_RUNNING_REQUIRED,
+ COMMIT_BROKEN,
+};
+
+/*
+ * 'ubifs_scan_a_node()' return values.
+ *
+ * SCANNED_GARBAGE: scanned garbage
+ * SCANNED_EMPTY_SPACE: scanned empty space
+ * SCANNED_A_NODE: scanned a valid node
+ * SCANNED_A_CORRUPT_NODE: scanned a corrupted node
+ * SCANNED_A_BAD_PAD_NODE: scanned a padding node with invalid pad length
+ *
+ * Greater than zero means: 'scanned that number of padding bytes'
+ */
+enum {
+ SCANNED_GARBAGE = 0,
+ SCANNED_EMPTY_SPACE = -1,
+ SCANNED_A_NODE = -2,
+ SCANNED_A_CORRUPT_NODE = -3,
+ SCANNED_A_BAD_PAD_NODE = -4,
+};
+
+/*
+ * LPT cnode flag bits.
+ *
+ * DIRTY_CNODE: cnode is dirty
+ * COW_CNODE: cnode is being committed and must be copied before writing
+ * OBSOLETE_CNODE: cnode is being committed and has been copied (or deleted),
+ * so it can (and must) be freed when the commit is finished
+ */
+enum {
+ DIRTY_CNODE = 0,
+ COW_CNODE = 1,
+ OBSOLETE_CNODE = 2,
+};
+
+/*
+ * Dirty flag bits (lpt_drty_flgs) for LPT special nodes.
+ *
+ * LTAB_DIRTY: ltab node is dirty
+ * LSAVE_DIRTY: lsave node is dirty
+ */
+enum {
+ LTAB_DIRTY = 1,
+ LSAVE_DIRTY = 2,
+};
+
+/*
+ * Return codes used by the garbage collector.
+ * @LEB_FREED: the logical eraseblock was freed and is ready to use
+ * @LEB_FREED_IDX: indexing LEB was freed and can be used only after the commit
+ * @LEB_RETAINED: the logical eraseblock was freed and retained for GC purposes
+ */
+enum {
+ LEB_FREED,
+ LEB_FREED_IDX,
+ LEB_RETAINED,
+};
+
+/**
+ * struct ubifs_old_idx - index node obsoleted since last commit start.
+ * @rb: rb-tree node
+ * @lnum: LEB number of obsoleted index node
+ * @offs: offset of obsoleted index node
+ */
+struct ubifs_old_idx {
+ struct rb_node rb;
+ int lnum;
+ int offs;
+};
+
+/* The below union makes it easier to deal with keys */
+union ubifs_key {
+ uint8_t u8[CUR_MAX_KEY_LEN];
+ uint32_t u32[CUR_MAX_KEY_LEN/4];
+ uint64_t u64[CUR_MAX_KEY_LEN/8];
+ __le32 j32[CUR_MAX_KEY_LEN/4];
+};
+
+/**
+ * struct ubifs_scan_node - UBIFS scanned node information.
+ * @list: list of scanned nodes
+ * @key: key of node scanned (if it has one)
+ * @sqnum: sequence number
+ * @type: type of node scanned
+ * @offs: offset with LEB of node scanned
+ * @len: length of node scanned
+ * @node: raw node
+ */
+struct ubifs_scan_node {
+ struct list_head list;
+ union ubifs_key key;
+ unsigned long long sqnum;
+ int type;
+ int offs;
+ int len;
+ void *node;
+};
+
+/**
+ * struct ubifs_scan_leb - UBIFS scanned LEB information.
+ * @lnum: logical eraseblock number
+ * @nodes_cnt: number of nodes scanned
+ * @nodes: list of struct ubifs_scan_node
+ * @endpt: end point (and therefore the start of empty space)
+ * @ecc: read returned -EBADMSG
+ * @buf: buffer containing entire LEB scanned
+ */
+struct ubifs_scan_leb {
+ int lnum;
+ int nodes_cnt;
+ struct list_head nodes;
+ int endpt;
+ int ecc;
+ void *buf;
+};
+
+/**
+ * struct ubifs_gced_idx_leb - garbage-collected indexing LEB.
+ * @list: list
+ * @lnum: LEB number
+ * @unmap: OK to unmap this LEB
+ *
+ * This data structure is used to temporary store garbage-collected indexing
+ * LEBs - they are not released immediately, but only after the next commit.
+ * This is needed to guarantee recoverability.
+ */
+struct ubifs_gced_idx_leb {
+ struct list_head list;
+ int lnum;
+ int unmap;
+};
+
+/**
+ * struct ubifs_inode - UBIFS in-memory inode description.
+ * @vfs_inode: VFS inode description object
+ * @creat_sqnum: sequence number at time of creation
+ * @del_cmtno: commit number corresponding to the time the inode was deleted,
+ * protected by @c->commit_sem;
+ * @xattr_size: summarized size of all extended attributes in bytes
+ * @xattr_cnt: count of extended attributes this inode has
+ * @xattr_names: sum of lengths of all extended attribute names belonging to
+ * this inode
+ * @dirty: non-zero if the inode is dirty
+ * @xattr: non-zero if this is an extended attribute inode
+ * @bulk_read: non-zero if bulk-read should be used
+ * @ui_mutex: serializes inode write-back with the rest of VFS operations,
+ * serializes "clean <-> dirty" state changes, serializes bulk-read,
+ * protects @dirty, @bulk_read, @ui_size, and @xattr_size
+ * @ui_lock: protects @synced_i_size
+ * @synced_i_size: synchronized size of inode, i.e. the value of inode size
+ * currently stored on the flash; used only for regular file
+ * inodes
+ * @ui_size: inode size used by UBIFS when writing to flash
+ * @flags: inode flags (@UBIFS_COMPR_FL, etc)
+ * @compr_type: default compression type used for this inode
+ * @last_page_read: page number of last page read (for bulk read)
+ * @read_in_a_row: number of consecutive pages read in a row (for bulk read)
+ * @data_len: length of the data attached to the inode
+ * @data: inode's data
+ *
+ * @ui_mutex exists for two main reasons. At first it prevents inodes from
+ * being written back while UBIFS changing them, being in the middle of an VFS
+ * operation. This way UBIFS makes sure the inode fields are consistent. For
+ * example, in 'ubifs_rename()' we change 3 inodes simultaneously, and
+ * write-back must not write any of them before we have finished.
+ *
+ * The second reason is budgeting - UBIFS has to budget all operations. If an
+ * operation is going to mark an inode dirty, it has to allocate budget for
+ * this. It cannot just mark it dirty because there is no guarantee there will
+ * be enough flash space to write the inode back later. This means UBIFS has
+ * to have full control over inode "clean <-> dirty" transitions (and pages
+ * actually). But unfortunately, VFS marks inodes dirty in many places, and it
+ * does not ask the file-system if it is allowed to do so (there is a notifier,
+ * but it is not enough), i.e., there is no mechanism to synchronize with this.
+ * So UBIFS has its own inode dirty flag and its own mutex to serialize
+ * "clean <-> dirty" transitions.
+ *
+ * The @synced_i_size field is used to make sure we never write pages which are
+ * beyond last synchronized inode size. See 'ubifs_writepage()' for more
+ * information.
+ *
+ * The @ui_size is a "shadow" variable for @inode->i_size and UBIFS uses
+ * @ui_size instead of @inode->i_size. The reason for this is that UBIFS cannot
+ * make sure @inode->i_size is always changed under @ui_mutex, because it
+ * cannot call 'vmtruncate()' with @ui_mutex locked, because it would deadlock
+ * with 'ubifs_writepage()' (see file.c). All the other inode fields are
+ * changed under @ui_mutex, so they do not need "shadow" fields. Note, one
+ * could consider to rework locking and base it on "shadow" fields.
+ */
+struct ubifs_inode {
+ struct inode vfs_inode;
+ unsigned long long creat_sqnum;
+ unsigned long long del_cmtno;
+ unsigned int xattr_size;
+ unsigned int xattr_cnt;
+ unsigned int xattr_names;
+ unsigned int dirty:1;
+ unsigned int xattr:1;
+ unsigned int bulk_read:1;
+ unsigned int compr_type:2;
+ struct mutex ui_mutex;
+ spinlock_t ui_lock;
+ loff_t synced_i_size;
+ loff_t ui_size;
+ int flags;
+ pgoff_t last_page_read;
+ pgoff_t read_in_a_row;
+ int data_len;
+ void *data;
+};
+
+/**
+ * struct ubifs_unclean_leb - records a LEB recovered under read-only mode.
+ * @list: list
+ * @lnum: LEB number of recovered LEB
+ * @endpt: offset where recovery ended
+ *
+ * This structure records a LEB identified during recovery that needs to be
+ * cleaned but was not because UBIFS was mounted read-only. The information
+ * is used to clean the LEB when remounting to read-write mode.
+ */
+struct ubifs_unclean_leb {
+ struct list_head list;
+ int lnum;
+ int endpt;
+};
+
+/*
+ * LEB properties flags.
+ *
+ * LPROPS_UNCAT: not categorized
+ * LPROPS_DIRTY: dirty > free, dirty >= @c->dead_wm, not index
+ * LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index
+ * LPROPS_FREE: free > 0, dirty < @c->dead_wm, not empty, not index
+ * LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs
+ * LPROPS_EMPTY: LEB is empty, not taken
+ * LPROPS_FREEABLE: free + dirty == leb_size, not index, not taken
+ * LPROPS_FRDI_IDX: free + dirty == leb_size and index, may be taken
+ * LPROPS_CAT_MASK: mask for the LEB categories above
+ * LPROPS_TAKEN: LEB was taken (this flag is not saved on the media)
+ * LPROPS_INDEX: LEB contains indexing nodes (this flag also exists on flash)
+ */
+enum {
+ LPROPS_UNCAT = 0,
+ LPROPS_DIRTY = 1,
+ LPROPS_DIRTY_IDX = 2,
+ LPROPS_FREE = 3,
+ LPROPS_HEAP_CNT = 3,
+ LPROPS_EMPTY = 4,
+ LPROPS_FREEABLE = 5,
+ LPROPS_FRDI_IDX = 6,
+ LPROPS_CAT_MASK = 15,
+ LPROPS_TAKEN = 16,
+ LPROPS_INDEX = 32,
+};
+
+/**
+ * struct ubifs_lprops - logical eraseblock properties.
+ * @free: amount of free space in bytes
+ * @dirty: amount of dirty space in bytes
+ * @flags: LEB properties flags (see above)
+ * @lnum: LEB number
+ * @list: list of same-category lprops (for LPROPS_EMPTY and LPROPS_FREEABLE)
+ * @hpos: heap position in heap of same-category lprops (other categories)
+ */
+struct ubifs_lprops {
+ int free;
+ int dirty;
+ int flags;
+ int lnum;
+ union {
+ struct list_head list;
+ int hpos;
+ };
+};
+
+/**
+ * struct ubifs_lpt_lprops - LPT logical eraseblock properties.
+ * @free: amount of free space in bytes
+ * @dirty: amount of dirty space in bytes
+ * @tgc: trivial GC flag (1 => unmap after commit end)
+ * @cmt: commit flag (1 => reserved for commit)
+ */
+struct ubifs_lpt_lprops {
+ int free;
+ int dirty;
+ unsigned tgc:1;
+ unsigned cmt:1;
+};
+
+/**
+ * struct ubifs_lp_stats - statistics of eraseblocks in the main area.
+ * @empty_lebs: number of empty LEBs
+ * @taken_empty_lebs: number of taken LEBs
+ * @idx_lebs: number of indexing LEBs
+ * @total_free: total free space in bytes (includes all LEBs)
+ * @total_dirty: total dirty space in bytes (includes all LEBs)
+ * @total_used: total used space in bytes (does not include index LEBs)
+ * @total_dead: total dead space in bytes (does not include index LEBs)
+ * @total_dark: total dark space in bytes (does not include index LEBs)
+ *
+ * The @taken_empty_lebs field counts the LEBs that are in the transient state
+ * of having been "taken" for use but not yet written to. @taken_empty_lebs is
+ * needed to account correctly for @gc_lnum, otherwise @empty_lebs could be
+ * used by itself (in which case 'unused_lebs' would be a better name). In the
+ * case of @gc_lnum, it is "taken" at mount time or whenever a LEB is retained
+ * by GC, but unlike other empty LEBs that are "taken", it may not be written
+ * straight away (i.e. before the next commit start or unmount), so either
+ * @gc_lnum must be specially accounted for, or the current approach followed
+ * i.e. count it under @taken_empty_lebs.
+ *
+ * @empty_lebs includes @taken_empty_lebs.
+ *
+ * @total_used, @total_dead and @total_dark fields do not account indexing
+ * LEBs.
+ */
+struct ubifs_lp_stats {
+ int empty_lebs;
+ int taken_empty_lebs;
+ int idx_lebs;
+ long long total_free;
+ long long total_dirty;
+ long long total_used;
+ long long total_dead;
+ long long total_dark;
+};
+
+struct ubifs_nnode;
+
+/**
+ * struct ubifs_cnode - LEB Properties Tree common node.
+ * @parent: parent nnode
+ * @cnext: next cnode to commit
+ * @flags: flags (%DIRTY_LPT_NODE or %OBSOLETE_LPT_NODE)
+ * @iip: index in parent
+ * @level: level in the tree (zero for pnodes, greater than zero for nnodes)
+ * @num: node number
+ */
+struct ubifs_cnode {
+ struct ubifs_nnode *parent;
+ struct ubifs_cnode *cnext;
+ unsigned long flags;
+ int iip;
+ int level;
+ int num;
+};
+
+/**
+ * struct ubifs_pnode - LEB Properties Tree leaf node.
+ * @parent: parent nnode
+ * @cnext: next cnode to commit
+ * @flags: flags (%DIRTY_LPT_NODE or %OBSOLETE_LPT_NODE)
+ * @iip: index in parent
+ * @level: level in the tree (always zero for pnodes)
+ * @num: node number
+ * @lprops: LEB properties array
+ */
+struct ubifs_pnode {
+ struct ubifs_nnode *parent;
+ struct ubifs_cnode *cnext;
+ unsigned long flags;
+ int iip;
+ int level;
+ int num;
+ struct ubifs_lprops lprops[UBIFS_LPT_FANOUT];
+};
+
+/**
+ * struct ubifs_nbranch - LEB Properties Tree internal node branch.
+ * @lnum: LEB number of child
+ * @offs: offset of child
+ * @nnode: nnode child
+ * @pnode: pnode child
+ * @cnode: cnode child
+ */
+struct ubifs_nbranch {
+ int lnum;
+ int offs;
+ union {
+ struct ubifs_nnode *nnode;
+ struct ubifs_pnode *pnode;
+ struct ubifs_cnode *cnode;
+ };
+};
+
+/**
+ * struct ubifs_nnode - LEB Properties Tree internal node.
+ * @parent: parent nnode
+ * @cnext: next cnode to commit
+ * @flags: flags (%DIRTY_LPT_NODE or %OBSOLETE_LPT_NODE)
+ * @iip: index in parent
+ * @level: level in the tree (always greater than zero for nnodes)
+ * @num: node number
+ * @nbranch: branches to child nodes
+ */
+struct ubifs_nnode {
+ struct ubifs_nnode *parent;
+ struct ubifs_cnode *cnext;
+ unsigned long flags;
+ int iip;
+ int level;
+ int num;
+ struct ubifs_nbranch nbranch[UBIFS_LPT_FANOUT];
+};
+
+/**
+ * struct ubifs_lpt_heap - heap of categorized lprops.
+ * @arr: heap array
+ * @cnt: number in heap
+ * @max_cnt: maximum number allowed in heap
+ *
+ * There are %LPROPS_HEAP_CNT heaps.
+ */
+struct ubifs_lpt_heap {
+ struct ubifs_lprops **arr;
+ int cnt;
+ int max_cnt;
+};
+
+/*
+ * Return codes for LPT scan callback function.
+ *
+ * LPT_SCAN_CONTINUE: continue scanning
+ * LPT_SCAN_ADD: add the LEB properties scanned to the tree in memory
+ * LPT_SCAN_STOP: stop scanning
+ */
+enum {
+ LPT_SCAN_CONTINUE = 0,
+ LPT_SCAN_ADD = 1,
+ LPT_SCAN_STOP = 2,
+};
+
+struct ubifs_info;
+
+/* Callback used by the 'ubifs_lpt_scan_nolock()' function */
+typedef int (*ubifs_lpt_scan_callback)(struct ubifs_info *c,
+ const struct ubifs_lprops *lprops,
+ int in_tree, void *data);
+
+/**
+ * struct ubifs_wbuf - UBIFS write-buffer.
+ * @c: UBIFS file-system description object
+ * @buf: write-buffer (of min. flash I/O unit size)
+ * @lnum: logical eraseblock number the write-buffer points to
+ * @offs: write-buffer offset in this logical eraseblock
+ * @avail: number of bytes available in the write-buffer
+ * @used: number of used bytes in the write-buffer
+ * @dtype: type of data stored in this LEB (%UBI_LONGTERM, %UBI_SHORTTERM,
+ * %UBI_UNKNOWN)
+ * @jhead: journal head the mutex belongs to (note, needed only to shut lockdep
+ * up by 'mutex_lock_nested()).
+ * @sync_callback: write-buffer synchronization callback
+ * @io_mutex: serializes write-buffer I/O
+ * @lock: serializes @buf, @lnum, @offs, @avail, @used, @next_ino and @inodes
+ * fields
+ * @timer: write-buffer timer
+ * @timeout: timer expire interval in jiffies
+ * @need_sync: it is set if its timer expired and needs sync
+ * @next_ino: points to the next position of the following inode number
+ * @inodes: stores the inode numbers of the nodes which are in wbuf
+ *
+ * The write-buffer synchronization callback is called when the write-buffer is
+ * synchronized in order to notify how much space was wasted due to
+ * write-buffer padding and how much free space is left in the LEB.
+ *
+ * Note: the fields @buf, @lnum, @offs, @avail and @used can be read under
+ * spin-lock or mutex because they are written under both mutex and spin-lock.
+ * @buf is appended to under mutex but overwritten under both mutex and
+ * spin-lock. Thus the data between @buf and @buf + @used can be read under
+ * spinlock.
+ */
+struct ubifs_wbuf {
+ struct ubifs_info *c;
+ void *buf;
+ int lnum;
+ int offs;
+ int avail;
+ int used;
+ int dtype;
+ int jhead;
+ int (*sync_callback)(struct ubifs_info *c, int lnum, int free, int pad);
+ struct mutex io_mutex;
+ spinlock_t lock;
+ int timeout;
+ int need_sync;
+ int next_ino;
+ ino_t *inodes;
+};
+
+/**
+ * struct ubifs_bud - bud logical eraseblock.
+ * @lnum: logical eraseblock number
+ * @start: where the (uncommitted) bud data starts
+ * @jhead: journal head number this bud belongs to
+ * @list: link in the list buds belonging to the same journal head
+ * @rb: link in the tree of all buds
+ */
+struct ubifs_bud {
+ int lnum;
+ int start;
+ int jhead;
+ struct list_head list;
+ struct rb_node rb;
+};
+
+/**
+ * struct ubifs_jhead - journal head.
+ * @wbuf: head's write-buffer
+ * @buds_list: list of bud LEBs belonging to this journal head
+ *
+ * Note, the @buds list is protected by the @c->buds_lock.
+ */
+struct ubifs_jhead {
+ struct ubifs_wbuf wbuf;
+ struct list_head buds_list;
+};
+
+/**
+ * struct ubifs_zbranch - key/coordinate/length branch stored in znodes.
+ * @key: key
+ * @znode: znode address in memory
+ * @lnum: LEB number of the target node (indexing node or data node)
+ * @offs: target node offset within @lnum
+ * @len: target node length
+ */
+struct ubifs_zbranch {
+ union ubifs_key key;
+ union {
+ struct ubifs_znode *znode;
+ void *leaf;
+ };
+ int lnum;
+ int offs;
+ int len;
+};
+
+/**
+ * struct ubifs_znode - in-memory representation of an indexing node.
+ * @parent: parent znode or NULL if it is the root
+ * @cnext: next znode to commit
+ * @flags: znode flags (%DIRTY_ZNODE, %COW_ZNODE or %OBSOLETE_ZNODE)
+ * @time: last access time (seconds)
+ * @level: level of the entry in the TNC tree
+ * @child_cnt: count of child znodes
+ * @iip: index in parent's zbranch array
+ * @alt: lower bound of key range has altered i.e. child inserted at slot 0
+ * @lnum: LEB number of the corresponding indexing node
+ * @offs: offset of the corresponding indexing node
+ * @len: length of the corresponding indexing node
+ * @zbranch: array of znode branches (@c->fanout elements)
+ */
+struct ubifs_znode {
+ struct ubifs_znode *parent;
+ struct ubifs_znode *cnext;
+ unsigned long flags;
+ unsigned long time;
+ int level;
+ int child_cnt;
+ int iip;
+ int alt;
+#ifdef CONFIG_UBIFS_FS_DEBUG
+ int lnum, offs, len;
+#endif
+ struct ubifs_zbranch zbranch[];
+};
+
+/**
+ * struct bu_info - bulk-read information.
+ * @key: first data node key
+ * @zbranch: zbranches of data nodes to bulk read
+ * @buf: buffer to read into
+ * @buf_len: buffer length
+ * @gc_seq: GC sequence number to detect races with GC
+ * @cnt: number of data nodes for bulk read
+ * @blk_cnt: number of data blocks including holes
+ * @oef: end of file reached
+ */
+struct bu_info {
+ union ubifs_key key;
+ struct ubifs_zbranch zbranch[UBIFS_MAX_BULK_READ];
+ void *buf;
+ int buf_len;
+ int gc_seq;
+ int cnt;
+ int blk_cnt;
+ int eof;
+};
+
+/**
+ * struct ubifs_node_range - node length range description data structure.
+ * @len: fixed node length
+ * @min_len: minimum possible node length
+ * @max_len: maximum possible node length
+ *
+ * If @max_len is %0, the node has fixed length @len.
+ */
+struct ubifs_node_range {
+ union {
+ int len;
+ int min_len;
+ };
+ int max_len;
+};
+
+/**
+ * struct ubifs_compressor - UBIFS compressor description structure.
+ * @compr_type: compressor type (%UBIFS_COMPR_LZO, etc)
+ * @cc: cryptoapi compressor handle
+ * @comp_mutex: mutex used during compression
+ * @decomp_mutex: mutex used during decompression
+ * @name: compressor name
+ * @capi_name: cryptoapi compressor name
+ */
+struct ubifs_compressor {
+ int compr_type;
+ char *name;
+ char *capi_name;
+ int (*decompress)(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len);
+};
+
+/**
+ * struct ubifs_budget_req - budget requirements of an operation.
+ *
+ * @fast: non-zero if the budgeting should try to acquire budget quickly and
+ * should not try to call write-back
+ * @recalculate: non-zero if @idx_growth, @data_growth, and @dd_growth fields
+ * have to be re-calculated
+ * @new_page: non-zero if the operation adds a new page
+ * @dirtied_page: non-zero if the operation makes a page dirty
+ * @new_dent: non-zero if the operation adds a new directory entry
+ * @mod_dent: non-zero if the operation removes or modifies an existing
+ * directory entry
+ * @new_ino: non-zero if the operation adds a new inode
+ * @new_ino_d: now much data newly created inode contains
+ * @dirtied_ino: how many inodes the operation makes dirty
+ * @dirtied_ino_d: now much data dirtied inode contains
+ * @idx_growth: how much the index will supposedly grow
+ * @data_growth: how much new data the operation will supposedly add
+ * @dd_growth: how much data that makes other data dirty the operation will
+ * supposedly add
+ *
+ * @idx_growth, @data_growth and @dd_growth are not used in budget request. The
+ * budgeting subsystem caches index and data growth values there to avoid
+ * re-calculating them when the budget is released. However, if @idx_growth is
+ * %-1, it is calculated by the release function using other fields.
+ *
+ * An inode may contain 4KiB of data at max., thus the widths of @new_ino_d
+ * is 13 bits, and @dirtied_ino_d - 15, because up to 4 inodes may be made
+ * dirty by the re-name operation.
+ *
+ * Note, UBIFS aligns node lengths to 8-bytes boundary, so the requester has to
+ * make sure the amount of inode data which contribute to @new_ino_d and
+ * @dirtied_ino_d fields are aligned.
+ */
+struct ubifs_budget_req {
+ unsigned int fast:1;
+ unsigned int recalculate:1;
+#ifndef UBIFS_DEBUG
+ unsigned int new_page:1;
+ unsigned int dirtied_page:1;
+ unsigned int new_dent:1;
+ unsigned int mod_dent:1;
+ unsigned int new_ino:1;
+ unsigned int new_ino_d:13;
+ unsigned int dirtied_ino:4;
+ unsigned int dirtied_ino_d:15;
+#else
+ /* Not bit-fields to check for overflows */
+ unsigned int new_page;
+ unsigned int dirtied_page;
+ unsigned int new_dent;
+ unsigned int mod_dent;
+ unsigned int new_ino;
+ unsigned int new_ino_d;
+ unsigned int dirtied_ino;
+ unsigned int dirtied_ino_d;
+#endif
+ int idx_growth;
+ int data_growth;
+ int dd_growth;
+};
+
+/**
+ * struct ubifs_orphan - stores the inode number of an orphan.
+ * @rb: rb-tree node of rb-tree of orphans sorted by inode number
+ * @list: list head of list of orphans in order added
+ * @new_list: list head of list of orphans added since the last commit
+ * @cnext: next orphan to commit
+ * @dnext: next orphan to delete
+ * @inum: inode number
+ * @new: %1 => added since the last commit, otherwise %0
+ */
+struct ubifs_orphan {
+ struct rb_node rb;
+ struct list_head list;
+ struct list_head new_list;
+ struct ubifs_orphan *cnext;
+ struct ubifs_orphan *dnext;
+ ino_t inum;
+ int new;
+};
+
+/**
+ * struct ubifs_mount_opts - UBIFS-specific mount options information.
+ * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast)
+ * @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable)
+ * @chk_data_crc: enable/disable CRC data checking when reading data nodes
+ * (%0 default, %1 disabe, %2 enable)
+ * @override_compr: override default compressor (%0 - do not override and use
+ * superblock compressor, %1 - override and use compressor
+ * specified in @compr_type)
+ * @compr_type: compressor type to override the superblock compressor with
+ * (%UBIFS_COMPR_NONE, etc)
+ */
+struct ubifs_mount_opts {
+ unsigned int unmount_mode:2;
+ unsigned int bulk_read:2;
+ unsigned int chk_data_crc:2;
+ unsigned int override_compr:1;
+ unsigned int compr_type:2;
+};
+
+struct ubifs_debug_info;
+
+/**
+ * struct ubifs_info - UBIFS file-system description data structure
+ * (per-superblock).
+ * @vfs_sb: VFS @struct super_block object
+ * @bdi: backing device info object to make VFS happy and disable read-ahead
+ *
+ * @highest_inum: highest used inode number
+ * @max_sqnum: current global sequence number
+ * @cmt_no: commit number of the last successfully completed commit, protected
+ * by @commit_sem
+ * @cnt_lock: protects @highest_inum and @max_sqnum counters
+ * @fmt_version: UBIFS on-flash format version
+ * @ro_compat_version: R/O compatibility version
+ * @uuid: UUID from super block
+ *
+ * @lhead_lnum: log head logical eraseblock number
+ * @lhead_offs: log head offset
+ * @ltail_lnum: log tail logical eraseblock number (offset is always 0)
+ * @log_mutex: protects the log, @lhead_lnum, @lhead_offs, @ltail_lnum, and
+ * @bud_bytes
+ * @min_log_bytes: minimum required number of bytes in the log
+ * @cmt_bud_bytes: used during commit to temporarily amount of bytes in
+ * committed buds
+ *
+ * @buds: tree of all buds indexed by bud LEB number
+ * @bud_bytes: how many bytes of flash is used by buds
+ * @buds_lock: protects the @buds tree, @bud_bytes, and per-journal head bud
+ * lists
+ * @jhead_cnt: count of journal heads
+ * @jheads: journal heads (head zero is base head)
+ * @max_bud_bytes: maximum number of bytes allowed in buds
+ * @bg_bud_bytes: number of bud bytes when background commit is initiated
+ * @old_buds: buds to be released after commit ends
+ * @max_bud_cnt: maximum number of buds
+ *
+ * @commit_sem: synchronizes committer with other processes
+ * @cmt_state: commit state
+ * @cs_lock: commit state lock
+ * @cmt_wq: wait queue to sleep on if the log is full and a commit is running
+ *
+ * @big_lpt: flag that LPT is too big to write whole during commit
+ * @no_chk_data_crc: do not check CRCs when reading data nodes (except during
+ * recovery)
+ * @bulk_read: enable bulk-reads
+ * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc)
+ * @rw_incompat: the media is not R/W compatible
+ *
+ * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and
+ * @calc_idx_sz
+ * @zroot: zbranch which points to the root index node and znode
+ * @cnext: next znode to commit
+ * @enext: next znode to commit to empty space
+ * @gap_lebs: array of LEBs used by the in-gaps commit method
+ * @cbuf: commit buffer
+ * @ileb_buf: buffer for commit in-the-gaps method
+ * @ileb_len: length of data in ileb_buf
+ * @ihead_lnum: LEB number of index head
+ * @ihead_offs: offset of index head
+ * @ilebs: pre-allocated index LEBs
+ * @ileb_cnt: number of pre-allocated index LEBs
+ * @ileb_nxt: next pre-allocated index LEBs
+ * @old_idx: tree of index nodes obsoleted since the last commit start
+ * @bottom_up_buf: a buffer which is used by 'dirty_cow_bottom_up()' in tnc.c
+ *
+ * @mst_node: master node
+ * @mst_offs: offset of valid master node
+ * @mst_mutex: protects the master node area, @mst_node, and @mst_offs
+ *
+ * @max_bu_buf_len: maximum bulk-read buffer length
+ * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu
+ * @bu: pre-allocated bulk-read information
+ *
+ * @log_lebs: number of logical eraseblocks in the log
+ * @log_bytes: log size in bytes
+ * @log_last: last LEB of the log
+ * @lpt_lebs: number of LEBs used for lprops table
+ * @lpt_first: first LEB of the lprops table area
+ * @lpt_last: last LEB of the lprops table area
+ * @orph_lebs: number of LEBs used for the orphan area
+ * @orph_first: first LEB of the orphan area
+ * @orph_last: last LEB of the orphan area
+ * @main_lebs: count of LEBs in the main area
+ * @main_first: first LEB of the main area
+ * @main_bytes: main area size in bytes
+ *
+ * @key_hash_type: type of the key hash
+ * @key_hash: direntry key hash function
+ * @key_fmt: key format
+ * @key_len: key length
+ * @fanout: fanout of the index tree (number of links per indexing node)
+ *
+ * @min_io_size: minimal input/output unit size
+ * @min_io_shift: number of bits in @min_io_size minus one
+ * @leb_size: logical eraseblock size in bytes
+ * @half_leb_size: half LEB size
+ * @leb_cnt: count of logical eraseblocks
+ * @max_leb_cnt: maximum count of logical eraseblocks
+ * @old_leb_cnt: count of logical eraseblocks before re-size
+ * @ro_media: the underlying UBI volume is read-only
+ *
+ * @dirty_pg_cnt: number of dirty pages (not used)
+ * @dirty_zn_cnt: number of dirty znodes
+ * @clean_zn_cnt: number of clean znodes
+ *
+ * @budg_idx_growth: amount of bytes budgeted for index growth
+ * @budg_data_growth: amount of bytes budgeted for cached data
+ * @budg_dd_growth: amount of bytes budgeted for cached data that will make
+ * other data dirty
+ * @budg_uncommitted_idx: amount of bytes were budgeted for growth of the index,
+ * but which still have to be taken into account because
+ * the index has not been committed so far
+ * @space_lock: protects @budg_idx_growth, @budg_data_growth, @budg_dd_growth,
+ * @budg_uncommited_idx, @min_idx_lebs, @old_idx_sz, @lst,
+ * @nospace, and @nospace_rp;
+ * @min_idx_lebs: minimum number of LEBs required for the index
+ * @old_idx_sz: size of index on flash
+ * @calc_idx_sz: temporary variable which is used to calculate new index size
+ * (contains accurate new index size at end of TNC commit start)
+ * @lst: lprops statistics
+ * @nospace: non-zero if the file-system does not have flash space (used as
+ * optimization)
+ * @nospace_rp: the same as @nospace, but additionally means that even reserved
+ * pool is full
+ *
+ * @page_budget: budget for a page
+ * @inode_budget: budget for an inode
+ * @dent_budget: budget for a directory entry
+ *
+ * @ref_node_alsz: size of the LEB reference node aligned to the min. flash
+ * I/O unit
+ * @mst_node_alsz: master node aligned size
+ * @min_idx_node_sz: minimum indexing node aligned on 8-bytes boundary
+ * @max_idx_node_sz: maximum indexing node aligned on 8-bytes boundary
+ * @max_inode_sz: maximum possible inode size in bytes
+ * @max_znode_sz: size of znode in bytes
+ *
+ * @leb_overhead: how many bytes are wasted in an LEB when it is filled with
+ * data nodes of maximum size - used in free space reporting
+ * @dead_wm: LEB dead space watermark
+ * @dark_wm: LEB dark space watermark
+ * @block_cnt: count of 4KiB blocks on the FS
+ *
+ * @ranges: UBIFS node length ranges
+ * @ubi: UBI volume descriptor
+ * @di: UBI device information
+ * @vi: UBI volume information
+ *
+ * @orph_tree: rb-tree of orphan inode numbers
+ * @orph_list: list of orphan inode numbers in order added
+ * @orph_new: list of orphan inode numbers added since last commit
+ * @orph_cnext: next orphan to commit
+ * @orph_dnext: next orphan to delete
+ * @orphan_lock: lock for orph_tree and orph_new
+ * @orph_buf: buffer for orphan nodes
+ * @new_orphans: number of orphans since last commit
+ * @cmt_orphans: number of orphans being committed
+ * @tot_orphans: number of orphans in the rb_tree
+ * @max_orphans: maximum number of orphans allowed
+ * @ohead_lnum: orphan head LEB number
+ * @ohead_offs: orphan head offset
+ * @no_orphs: non-zero if there are no orphans
+ *
+ * @bgt: UBIFS background thread
+ * @bgt_name: background thread name
+ * @need_bgt: if background thread should run
+ * @need_wbuf_sync: if write-buffers have to be synchronized
+ *
+ * @gc_lnum: LEB number used for garbage collection
+ * @sbuf: a buffer of LEB size used by GC and replay for scanning
+ * @idx_gc: list of index LEBs that have been garbage collected
+ * @idx_gc_cnt: number of elements on the idx_gc list
+ * @gc_seq: incremented for every non-index LEB garbage collected
+ * @gced_lnum: last non-index LEB that was garbage collected
+ *
+ * @infos_list: links all 'ubifs_info' objects
+ * @umount_mutex: serializes shrinker and un-mount
+ * @shrinker_run_no: shrinker run number
+ *
+ * @space_bits: number of bits needed to record free or dirty space
+ * @lpt_lnum_bits: number of bits needed to record a LEB number in the LPT
+ * @lpt_offs_bits: number of bits needed to record an offset in the LPT
+ * @lpt_spc_bits: number of bits needed to space in the LPT
+ * @pcnt_bits: number of bits needed to record pnode or nnode number
+ * @lnum_bits: number of bits needed to record LEB number
+ * @nnode_sz: size of on-flash nnode
+ * @pnode_sz: size of on-flash pnode
+ * @ltab_sz: size of on-flash LPT lprops table
+ * @lsave_sz: size of on-flash LPT save table
+ * @pnode_cnt: number of pnodes
+ * @nnode_cnt: number of nnodes
+ * @lpt_hght: height of the LPT
+ * @pnodes_have: number of pnodes in memory
+ *
+ * @lp_mutex: protects lprops table and all the other lprops-related fields
+ * @lpt_lnum: LEB number of the root nnode of the LPT
+ * @lpt_offs: offset of the root nnode of the LPT
+ * @nhead_lnum: LEB number of LPT head
+ * @nhead_offs: offset of LPT head
+ * @lpt_drty_flgs: dirty flags for LPT special nodes e.g. ltab
+ * @dirty_nn_cnt: number of dirty nnodes
+ * @dirty_pn_cnt: number of dirty pnodes
+ * @check_lpt_free: flag that indicates LPT GC may be needed
+ * @lpt_sz: LPT size
+ * @lpt_nod_buf: buffer for an on-flash nnode or pnode
+ * @lpt_buf: buffer of LEB size used by LPT
+ * @nroot: address in memory of the root nnode of the LPT
+ * @lpt_cnext: next LPT node to commit
+ * @lpt_heap: array of heaps of categorized lprops
+ * @dirty_idx: a (reverse sorted) copy of the LPROPS_DIRTY_IDX heap as at
+ * previous commit start
+ * @uncat_list: list of un-categorized LEBs
+ * @empty_list: list of empty LEBs
+ * @freeable_list: list of freeable non-index LEBs (free + dirty == leb_size)
+ * @frdi_idx_list: list of freeable index LEBs (free + dirty == leb_size)
+ * @freeable_cnt: number of freeable LEBs in @freeable_list
+ *
+ * @ltab_lnum: LEB number of LPT's own lprops table
+ * @ltab_offs: offset of LPT's own lprops table
+ * @ltab: LPT's own lprops table
+ * @ltab_cmt: LPT's own lprops table (commit copy)
+ * @lsave_cnt: number of LEB numbers in LPT's save table
+ * @lsave_lnum: LEB number of LPT's save table
+ * @lsave_offs: offset of LPT's save table
+ * @lsave: LPT's save table
+ * @lscan_lnum: LEB number of last LPT scan
+ *
+ * @rp_size: size of the reserved pool in bytes
+ * @report_rp_size: size of the reserved pool reported to user-space
+ * @rp_uid: reserved pool user ID
+ * @rp_gid: reserved pool group ID
+ *
+ * @empty: if the UBI device is empty
+ * @replay_tree: temporary tree used during journal replay
+ * @replay_list: temporary list used during journal replay
+ * @replay_buds: list of buds to replay
+ * @cs_sqnum: sequence number of first node in the log (commit start node)
+ * @replay_sqnum: sequence number of node currently being replayed
+ * @need_recovery: file-system needs recovery
+ * @replaying: set to %1 during journal replay
+ * @unclean_leb_list: LEBs to recover when mounting ro to rw
+ * @rcvrd_mst_node: recovered master node to write when mounting ro to rw
+ * @size_tree: inode size information for recovery
+ * @remounting_rw: set while remounting from ro to rw (sb flags have MS_RDONLY)
+ * @always_chk_crc: always check CRCs (while mounting and remounting rw)
+ * @mount_opts: UBIFS-specific mount options
+ *
+ * @dbg: debugging-related information
+ */
+struct ubifs_info {
+ struct super_block *vfs_sb;
+
+ ino_t highest_inum;
+ unsigned long long max_sqnum;
+ unsigned long long cmt_no;
+ spinlock_t cnt_lock;
+ int fmt_version;
+ int ro_compat_version;
+ unsigned char uuid[16];
+
+ int lhead_lnum;
+ int lhead_offs;
+ int ltail_lnum;
+ struct mutex log_mutex;
+ int min_log_bytes;
+ long long cmt_bud_bytes;
+
+ struct rb_root buds;
+ long long bud_bytes;
+ spinlock_t buds_lock;
+ int jhead_cnt;
+ struct ubifs_jhead *jheads;
+ long long max_bud_bytes;
+ long long bg_bud_bytes;
+ struct list_head old_buds;
+ int max_bud_cnt;
+
+ struct rw_semaphore commit_sem;
+ int cmt_state;
+ spinlock_t cs_lock;
+ wait_queue_head_t cmt_wq;
+
+ unsigned int big_lpt:1;
+ unsigned int no_chk_data_crc:1;
+ unsigned int bulk_read:1;
+ unsigned int default_compr:2;
+ unsigned int rw_incompat:1;
+
+ struct mutex tnc_mutex;
+ struct ubifs_zbranch zroot;
+ struct ubifs_znode *cnext;
+ struct ubifs_znode *enext;
+ int *gap_lebs;
+ void *cbuf;
+ void *ileb_buf;
+ int ileb_len;
+ int ihead_lnum;
+ int ihead_offs;
+ int *ilebs;
+ int ileb_cnt;
+ int ileb_nxt;
+ struct rb_root old_idx;
+ int *bottom_up_buf;
+
+ struct ubifs_mst_node *mst_node;
+ int mst_offs;
+ struct mutex mst_mutex;
+
+ int max_bu_buf_len;
+ struct mutex bu_mutex;
+ struct bu_info bu;
+
+ int log_lebs;
+ long long log_bytes;
+ int log_last;
+ int lpt_lebs;
+ int lpt_first;
+ int lpt_last;
+ int orph_lebs;
+ int orph_first;
+ int orph_last;
+ int main_lebs;
+ int main_first;
+ long long main_bytes;
+
+ uint8_t key_hash_type;
+ uint32_t (*key_hash)(const char *str, int len);
+ int key_fmt;
+ int key_len;
+ int fanout;
+
+ int min_io_size;
+ int min_io_shift;
+ int leb_size;
+ int half_leb_size;
+ int leb_cnt;
+ int max_leb_cnt;
+ int old_leb_cnt;
+ int ro_media;
+
+ long long budg_idx_growth;
+ long long budg_data_growth;
+ long long budg_dd_growth;
+ long long budg_uncommitted_idx;
+ spinlock_t space_lock;
+ int min_idx_lebs;
+ unsigned long long old_idx_sz;
+ unsigned long long calc_idx_sz;
+ struct ubifs_lp_stats lst;
+ unsigned int nospace:1;
+ unsigned int nospace_rp:1;
+
+ int page_budget;
+ int inode_budget;
+ int dent_budget;
+
+ int ref_node_alsz;
+ int mst_node_alsz;
+ int min_idx_node_sz;
+ int max_idx_node_sz;
+ long long max_inode_sz;
+ int max_znode_sz;
+
+ int leb_overhead;
+ int dead_wm;
+ int dark_wm;
+ int block_cnt;
+
+ struct ubifs_node_range ranges[UBIFS_NODE_TYPES_CNT];
+ struct ubi_volume_desc *ubi;
+ struct ubi_device_info di;
+ struct ubi_volume_info vi;
+
+ struct rb_root orph_tree;
+ struct list_head orph_list;
+ struct list_head orph_new;
+ struct ubifs_orphan *orph_cnext;
+ struct ubifs_orphan *orph_dnext;
+ spinlock_t orphan_lock;
+ void *orph_buf;
+ int new_orphans;
+ int cmt_orphans;
+ int tot_orphans;
+ int max_orphans;
+ int ohead_lnum;
+ int ohead_offs;
+ int no_orphs;
+
+ struct task_struct *bgt;
+ char bgt_name[sizeof(BGT_NAME_PATTERN) + 9];
+ int need_bgt;
+ int need_wbuf_sync;
+
+ int gc_lnum;
+ void *sbuf;
+ struct list_head idx_gc;
+ int idx_gc_cnt;
+ int gc_seq;
+ int gced_lnum;
+
+ struct list_head infos_list;
+ struct mutex umount_mutex;
+ unsigned int shrinker_run_no;
+
+ int space_bits;
+ int lpt_lnum_bits;
+ int lpt_offs_bits;
+ int lpt_spc_bits;
+ int pcnt_bits;
+ int lnum_bits;
+ int nnode_sz;
+ int pnode_sz;
+ int ltab_sz;
+ int lsave_sz;
+ int pnode_cnt;
+ int nnode_cnt;
+ int lpt_hght;
+ int pnodes_have;
+
+ struct mutex lp_mutex;
+ int lpt_lnum;
+ int lpt_offs;
+ int nhead_lnum;
+ int nhead_offs;
+ int lpt_drty_flgs;
+ int dirty_nn_cnt;
+ int dirty_pn_cnt;
+ int check_lpt_free;
+ long long lpt_sz;
+ void *lpt_nod_buf;
+ void *lpt_buf;
+ struct ubifs_nnode *nroot;
+ struct ubifs_cnode *lpt_cnext;
+ struct ubifs_lpt_heap lpt_heap[LPROPS_HEAP_CNT];
+ struct ubifs_lpt_heap dirty_idx;
+ struct list_head uncat_list;
+ struct list_head empty_list;
+ struct list_head freeable_list;
+ struct list_head frdi_idx_list;
+ int freeable_cnt;
+
+ int ltab_lnum;
+ int ltab_offs;
+ struct ubifs_lpt_lprops *ltab;
+ struct ubifs_lpt_lprops *ltab_cmt;
+ int lsave_cnt;
+ int lsave_lnum;
+ int lsave_offs;
+ int *lsave;
+ int lscan_lnum;
+
+ long long rp_size;
+ long long report_rp_size;
+ uid_t rp_uid;
+ gid_t rp_gid;
+
+ /* The below fields are used only during mounting and re-mounting */
+ int empty;
+ struct rb_root replay_tree;
+ struct list_head replay_list;
+ struct list_head replay_buds;
+ unsigned long long cs_sqnum;
+ unsigned long long replay_sqnum;
+ int need_recovery;
+ int replaying;
+ struct list_head unclean_leb_list;
+ struct ubifs_mst_node *rcvrd_mst_node;
+ struct rb_root size_tree;
+ int remounting_rw;
+ int always_chk_crc;
+ struct ubifs_mount_opts mount_opts;
+
+#ifdef CONFIG_UBIFS_FS_DEBUG
+ struct ubifs_debug_info *dbg;
+#endif
+};
+
+extern spinlock_t ubifs_infos_lock;
+extern struct kmem_cache *ubifs_inode_slab;
+extern const struct super_operations ubifs_super_operations;
+extern const struct address_space_operations ubifs_file_address_operations;
+extern const struct file_operations ubifs_file_operations;
+extern const struct inode_operations ubifs_file_inode_operations;
+extern const struct file_operations ubifs_dir_operations;
+extern const struct inode_operations ubifs_dir_inode_operations;
+extern const struct inode_operations ubifs_symlink_inode_operations;
+extern struct backing_dev_info ubifs_backing_dev_info;
+extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
+
+/* io.c */
+void ubifs_ro_mode(struct ubifs_info *c, int err);
+int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len);
+int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
+ int dtype);
+int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf);
+int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
+ int lnum, int offs);
+int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
+ int lnum, int offs);
+int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
+ int offs, int dtype);
+int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
+ int offs, int quiet, int must_chk_crc);
+void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad);
+void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last);
+int ubifs_io_init(struct ubifs_info *c);
+void ubifs_pad(const struct ubifs_info *c, void *buf, int pad);
+int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf);
+int ubifs_bg_wbufs_sync(struct ubifs_info *c);
+void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum);
+int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode);
+
+/* scan.c */
+struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
+ int offs, void *sbuf);
+void ubifs_scan_destroy(struct ubifs_scan_leb *sleb);
+int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
+ int offs, int quiet);
+struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
+ int offs, void *sbuf);
+void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ int lnum, int offs);
+int ubifs_add_snod(const struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ void *buf, int offs);
+void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
+ void *buf);
+
+/* log.c */
+void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud);
+void ubifs_create_buds_lists(struct ubifs_info *c);
+int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs);
+struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum);
+struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum);
+int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum);
+int ubifs_log_end_commit(struct ubifs_info *c, int new_ltail_lnum);
+int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum);
+int ubifs_consolidate_log(struct ubifs_info *c);
+
+/* journal.c */
+int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
+ const struct qstr *nm, const struct inode *inode,
+ int deletion, int xent);
+int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
+ const union ubifs_key *key, const void *buf, int len);
+int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode);
+int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode);
+int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
+ const struct dentry *old_dentry,
+ const struct inode *new_dir,
+ const struct dentry *new_dentry, int sync);
+int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
+ loff_t old_size, loff_t new_size);
+int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
+ const struct inode *inode, const struct qstr *nm);
+int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode1,
+ const struct inode *inode2);
+
+/* budget.c */
+int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req);
+void ubifs_release_budget(struct ubifs_info *c, struct ubifs_budget_req *req);
+void ubifs_release_dirty_inode_budget(struct ubifs_info *c,
+ struct ubifs_inode *ui);
+int ubifs_budget_inode_op(struct ubifs_info *c, struct inode *inode,
+ struct ubifs_budget_req *req);
+void ubifs_release_ino_dirty(struct ubifs_info *c, struct inode *inode,
+ struct ubifs_budget_req *req);
+void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode,
+ struct ubifs_budget_req *req);
+long long ubifs_get_free_space(struct ubifs_info *c);
+long long ubifs_get_free_space_nolock(struct ubifs_info *c);
+int ubifs_calc_min_idx_lebs(struct ubifs_info *c);
+void ubifs_convert_page_budget(struct ubifs_info *c);
+long long ubifs_reported_space(const struct ubifs_info *c, long long free);
+long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs);
+
+/* find.c */
+int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *free,
+ int squeeze);
+int ubifs_find_free_leb_for_idx(struct ubifs_info *c);
+int ubifs_find_dirty_leb(struct ubifs_info *c, struct ubifs_lprops *ret_lp,
+ int min_space, int pick_free);
+int ubifs_find_dirty_idx_leb(struct ubifs_info *c);
+int ubifs_save_dirty_idx_lnums(struct ubifs_info *c);
+
+/* tnc.c */
+int ubifs_lookup_level0(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_znode **zn, int *n);
+int ubifs_tnc_lookup_nm(struct ubifs_info *c, const union ubifs_key *key,
+ void *node, const struct qstr *nm);
+int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
+ void *node, int *lnum, int *offs);
+int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
+ int offs, int len);
+int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
+ int old_lnum, int old_offs, int lnum, int offs, int len);
+int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
+ int lnum, int offs, int len, const struct qstr *nm);
+int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key);
+int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
+ const struct qstr *nm);
+int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key,
+ union ubifs_key *to_key);
+int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum);
+struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
+ union ubifs_key *key,
+ const struct qstr *nm);
+void ubifs_tnc_close(struct ubifs_info *c);
+int ubifs_tnc_has_node(struct ubifs_info *c, union ubifs_key *key, int level,
+ int lnum, int offs, int is_idx);
+int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
+ int lnum, int offs);
+/* Shared by tnc.c for tnc_commit.c */
+void destroy_old_idx(struct ubifs_info *c);
+int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
+ int lnum, int offs);
+int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode);
+int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu);
+int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu);
+
+/* tnc_misc.c */
+struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
+ struct ubifs_znode *znode);
+int ubifs_search_zbranch(const struct ubifs_info *c,
+ const struct ubifs_znode *znode,
+ const union ubifs_key *key, int *n);
+struct ubifs_znode *ubifs_tnc_postorder_first(struct ubifs_znode *znode);
+struct ubifs_znode *ubifs_tnc_postorder_next(struct ubifs_znode *znode);
+long ubifs_destroy_tnc_subtree(struct ubifs_znode *zr);
+struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
+ struct ubifs_zbranch *zbr,
+ struct ubifs_znode *parent, int iip);
+int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ void *node);
+
+/* tnc_commit.c */
+int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot);
+int ubifs_tnc_end_commit(struct ubifs_info *c);
+
+/* shrinker.c */
+int ubifs_shrinker(int nr_to_scan, gfp_t gfp_mask);
+
+/* commit.c */
+int ubifs_bg_thread(void *info);
+void ubifs_commit_required(struct ubifs_info *c);
+void ubifs_request_bg_commit(struct ubifs_info *c);
+int ubifs_run_commit(struct ubifs_info *c);
+void ubifs_recovery_commit(struct ubifs_info *c);
+int ubifs_gc_should_commit(struct ubifs_info *c);
+void ubifs_wait_for_commit(struct ubifs_info *c);
+
+/* master.c */
+int ubifs_read_master(struct ubifs_info *c);
+int ubifs_write_master(struct ubifs_info *c);
+
+/* sb.c */
+int ubifs_read_superblock(struct ubifs_info *c);
+struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c);
+int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup);
+
+/* replay.c */
+int ubifs_validate_entry(struct ubifs_info *c,
+ const struct ubifs_dent_node *dent);
+int ubifs_replay_journal(struct ubifs_info *c);
+
+/* gc.c */
+int ubifs_garbage_collect(struct ubifs_info *c, int anyway);
+int ubifs_gc_start_commit(struct ubifs_info *c);
+int ubifs_gc_end_commit(struct ubifs_info *c);
+void ubifs_destroy_idx_gc(struct ubifs_info *c);
+int ubifs_get_idx_gc_leb(struct ubifs_info *c);
+int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp);
+
+/* orphan.c */
+int ubifs_add_orphan(struct ubifs_info *c, ino_t inum);
+void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum);
+int ubifs_orphan_start_commit(struct ubifs_info *c);
+int ubifs_orphan_end_commit(struct ubifs_info *c);
+int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only);
+int ubifs_clear_orphans(struct ubifs_info *c);
+
+/* lpt.c */
+int ubifs_calc_lpt_geom(struct ubifs_info *c);
+int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
+ int *lpt_lebs, int *big_lpt);
+int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr);
+struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum);
+struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum);
+int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum,
+ ubifs_lpt_scan_callback scan_cb, void *data);
+
+/* Shared by lpt.c for lpt_commit.c */
+void ubifs_pack_lsave(struct ubifs_info *c, void *buf, int *lsave);
+void ubifs_pack_ltab(struct ubifs_info *c, void *buf,
+ struct ubifs_lpt_lprops *ltab);
+void ubifs_pack_pnode(struct ubifs_info *c, void *buf,
+ struct ubifs_pnode *pnode);
+void ubifs_pack_nnode(struct ubifs_info *c, void *buf,
+ struct ubifs_nnode *nnode);
+struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c,
+ struct ubifs_nnode *parent, int iip);
+struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c,
+ struct ubifs_nnode *parent, int iip);
+int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip);
+void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty);
+void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode);
+uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits);
+struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght);
+/* Needed only in debugging code in lpt_commit.c */
+int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
+ struct ubifs_nnode *nnode);
+
+/* lpt_commit.c */
+int ubifs_lpt_start_commit(struct ubifs_info *c);
+int ubifs_lpt_end_commit(struct ubifs_info *c);
+int ubifs_lpt_post_commit(struct ubifs_info *c);
+void ubifs_lpt_free(struct ubifs_info *c, int wr_only);
+
+/* lprops.c */
+const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
+ const struct ubifs_lprops *lp,
+ int free, int dirty, int flags,
+ int idx_gc_cnt);
+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst);
+void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
+ int cat);
+void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
+ struct ubifs_lprops *new_lprops);
+void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops);
+int ubifs_categorize_lprops(const struct ubifs_info *c,
+ const struct ubifs_lprops *lprops);
+int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
+ int flags_set, int flags_clean, int idx_gc_cnt);
+int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
+ int flags_set, int flags_clean);
+int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp);
+const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c);
+const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c);
+const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c);
+const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c);
+
+/* file.c */
+int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync);
+int ubifs_setattr(struct dentry *dentry, struct iattr *attr);
+
+/* dir.c */
+struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
+ int mode);
+int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
+
+/* xattr.c */
+int ubifs_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
+ size_t size);
+ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+int ubifs_removexattr(struct dentry *dentry, const char *name);
+
+/* super.c */
+struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
+int ubifs_iput(struct inode *inode);
+
+/* recovery.c */
+int ubifs_recover_master_node(struct ubifs_info *c);
+int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
+struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
+ int offs, void *sbuf, int grouped);
+struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
+ int offs, void *sbuf);
+int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
+int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf);
+int ubifs_rcvry_gc_commit(struct ubifs_info *c);
+int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
+ int deletion, loff_t new_size);
+int ubifs_recover_size(struct ubifs_info *c);
+void ubifs_destroy_size_tree(struct ubifs_info *c);
+
+/* ioctl.c */
+long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+void ubifs_set_inode_flags(struct inode *inode);
+#ifdef CONFIG_COMPAT
+long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+/* compressor.c */
+int __init ubifs_compressors_init(void);
+void __exit ubifs_compressors_exit(void);
+void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len,
+ int *compr_type);
+int ubifs_decompress(const void *buf, int len, void *out, int *out_len,
+ int compr_type);
+
+/* these are used in cmd_ubifs.c */
+int ubifs_init(void);
+int ubifs_mount(char *vol_name);
+void ubifs_umount(struct ubifs_info *c);
+int ubifs_ls(char *dir_name);
+int ubifs_load(char *filename, u32 addr, u32 size);
+
+#include "debug.h"
+#include "misc.h"
+#include "key.h"
+
+/* todo: Move these to a common U-Boot header */
+int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len);
+#endif /* !__UBIFS_H__ */
diff --git a/qemu/roms/u-boot/fs/yaffs2/Makefile b/qemu/roms/u-boot/fs/yaffs2/Makefile
new file mode 100644
index 000000000..45ff7458c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/Makefile
@@ -0,0 +1,29 @@
+# Makefile for YAFFS direct test
+#
+#
+# YAFFS: Yet another Flash File System. A NAND-flash specific file system.
+#
+# Copyright (C) 2003 Aleph One Ltd.
+#
+#
+# Created by Charles Manning <charles@aleph1.co.uk>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# NB Warning this Makefile does not include header dependencies.
+#
+# $Id: Makefile,v 1.15 2007/07/18 19:40:38 charles Exp $
+
+obj-y := \
+ yaffs_allocator.o yaffs_attribs.o yaffs_bitmap.o yaffs_uboot_glue.o\
+ yaffs_checkptrw.o yaffs_ecc.o yaffs_error.o \
+ yaffsfs.o yaffs_guts.o yaffs_nameval.o yaffs_nand.o\
+ yaffs_packedtags1.o yaffs_packedtags2.o yaffs_qsort.o \
+ yaffs_summary.o yaffs_tagscompat.o yaffs_verify.o yaffs_yaffs1.o \
+ yaffs_yaffs2.o yaffs_mtdif.o yaffs_mtdif2.o
+
+ccflags-y = -DCONFIG_YAFFS_DIRECT -DCONFIG_YAFFS_SHORT_NAMES_IN_RAM \
+ -DCONFIG_YAFFS_YAFFS2 -DNO_Y_INLINE \
+ -DCONFIG_YAFFS_PROVIDE_DEFS -DCONFIG_YAFFSFS_PROVIDE_VALUES
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.c
new file mode 100644
index 000000000..611061fb4
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.c
@@ -0,0 +1,356 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_allocator.h"
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yportenv.h"
+
+/*
+ * Each entry in yaffs_tnode_list and yaffs_obj_list hold blocks
+ * of approx 100 objects that are themn allocated singly.
+ * This is basically a simplified slab allocator.
+ *
+ * We don't use the Linux slab allocator because slab does not allow
+ * us to dump all the objects in one hit when we do a umount and tear
+ * down all the tnodes and objects. slab requires that we first free
+ * the individual objects.
+ *
+ * Once yaffs has been mainlined I shall try to motivate for a change
+ * to slab to provide the extra features we need here.
+ */
+
+struct yaffs_tnode_list {
+ struct yaffs_tnode_list *next;
+ struct yaffs_tnode *tnodes;
+};
+
+struct yaffs_obj_list {
+ struct yaffs_obj_list *next;
+ struct yaffs_obj *objects;
+};
+
+struct yaffs_allocator {
+ int n_tnodes_created;
+ struct yaffs_tnode *free_tnodes;
+ int n_free_tnodes;
+ struct yaffs_tnode_list *alloc_tnode_list;
+
+ int n_obj_created;
+ struct list_head free_objs;
+ int n_free_objects;
+
+ struct yaffs_obj_list *allocated_obj_list;
+};
+
+static void yaffs_deinit_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->alloc_tnode_list) {
+ tmp = allocator->alloc_tnode_list->next;
+
+ kfree(allocator->alloc_tnode_list->tnodes);
+ kfree(allocator->alloc_tnode_list);
+ allocator->alloc_tnode_list = tmp;
+ }
+
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static void yaffs_init_raw_tnodes(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->alloc_tnode_list = NULL;
+ allocator->free_tnodes = NULL;
+ allocator->n_free_tnodes = 0;
+ allocator->n_tnodes_created = 0;
+}
+
+static int yaffs_create_tnodes(struct yaffs_dev *dev, int n_tnodes)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ int i;
+ struct yaffs_tnode *new_tnodes;
+ u8 *mem;
+ struct yaffs_tnode *curr;
+ struct yaffs_tnode *next;
+ struct yaffs_tnode_list *tnl;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_tnodes < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_tnodes = kmalloc(n_tnodes * dev->tnode_size, GFP_NOFS);
+ mem = (u8 *) new_tnodes;
+
+ if (!new_tnodes) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: Could not allocate Tnodes");
+ return YAFFS_FAIL;
+ }
+
+ /* New hookup for wide tnodes */
+ for (i = 0; i < n_tnodes - 1; i++) {
+ curr = (struct yaffs_tnode *)&mem[i * dev->tnode_size];
+ next = (struct yaffs_tnode *)&mem[(i + 1) * dev->tnode_size];
+ curr->internal[0] = next;
+ }
+
+ curr = (struct yaffs_tnode *)&mem[(n_tnodes - 1) * dev->tnode_size];
+ curr->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = (struct yaffs_tnode *)mem;
+
+ allocator->n_free_tnodes += n_tnodes;
+ allocator->n_tnodes_created += n_tnodes;
+
+ /* Now add this bunch of tnodes to a list for freeing up.
+ * NB If we can't add this to the management list it isn't fatal
+ * but it just means we can't free this bunch of tnodes later.
+ */
+ tnl = kmalloc(sizeof(struct yaffs_tnode_list), GFP_NOFS);
+ if (!tnl) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Could not add tnodes to management list");
+ return YAFFS_FAIL;
+ } else {
+ tnl->tnodes = new_tnodes;
+ tnl->next = allocator->alloc_tnode_list;
+ allocator->alloc_tnode_list = tnl;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Tnodes added");
+
+ return YAFFS_OK;
+}
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator =
+ (struct yaffs_allocator *)dev->allocator;
+ struct yaffs_tnode *tn = NULL;
+
+ if (!allocator) {
+ BUG();
+ return NULL;
+ }
+
+ /* If there are none left make more */
+ if (!allocator->free_tnodes)
+ yaffs_create_tnodes(dev, YAFFS_ALLOCATION_NTNODES);
+
+ if (allocator->free_tnodes) {
+ tn = allocator->free_tnodes;
+ allocator->free_tnodes = allocator->free_tnodes->internal[0];
+ allocator->n_free_tnodes--;
+ }
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ if (tn) {
+ tn->internal[0] = allocator->free_tnodes;
+ allocator->free_tnodes = tn;
+ allocator->n_free_tnodes++;
+ }
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+/*--------------- yaffs_obj alloaction ------------------------
+ *
+ * Free yaffs_objs are stored in a list using obj->siblings.
+ * The blocks of allocated objects are stored in a linked list.
+ */
+
+static void yaffs_init_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ allocator->allocated_obj_list = NULL;
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+}
+
+static void yaffs_deinit_raw_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ struct yaffs_obj_list *tmp;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ while (allocator->allocated_obj_list) {
+ tmp = allocator->allocated_obj_list->next;
+ kfree(allocator->allocated_obj_list->objects);
+ kfree(allocator->allocated_obj_list);
+ allocator->allocated_obj_list = tmp;
+ }
+
+ INIT_LIST_HEAD(&allocator->free_objs);
+ allocator->n_free_objects = 0;
+ allocator->n_obj_created = 0;
+}
+
+static int yaffs_create_free_objs(struct yaffs_dev *dev, int n_obj)
+{
+ struct yaffs_allocator *allocator = dev->allocator;
+ int i;
+ struct yaffs_obj *new_objs;
+ struct yaffs_obj_list *list;
+
+ if (!allocator) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (n_obj < 1)
+ return YAFFS_OK;
+
+ /* make these things */
+ new_objs = kmalloc(n_obj * sizeof(struct yaffs_obj), GFP_NOFS);
+ list = kmalloc(sizeof(struct yaffs_obj_list), GFP_NOFS);
+
+ if (!new_objs || !list) {
+ kfree(new_objs);
+ new_objs = NULL;
+ kfree(list);
+ list = NULL;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Could not allocate more objects");
+ return YAFFS_FAIL;
+ }
+
+ /* Hook them into the free list */
+ for (i = 0; i < n_obj; i++)
+ list_add(&new_objs[i].siblings, &allocator->free_objs);
+
+ allocator->n_free_objects += n_obj;
+ allocator->n_obj_created += n_obj;
+
+ /* Now add this bunch of Objects to a list for freeing up. */
+
+ list->objects = new_objs;
+ list->next = allocator->allocated_obj_list;
+ allocator->allocated_obj_list = list;
+
+ return YAFFS_OK;
+}
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = NULL;
+ struct list_head *lh;
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return obj;
+ }
+
+ /* If there are none left make more */
+ if (list_empty(&allocator->free_objs))
+ yaffs_create_free_objs(dev, YAFFS_ALLOCATION_NOBJECTS);
+
+ if (!list_empty(&allocator->free_objs)) {
+ lh = allocator->free_objs.next;
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ list_del_init(lh);
+ allocator->n_free_objects--;
+ }
+
+ return obj;
+}
+
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+
+ struct yaffs_allocator *allocator = dev->allocator;
+
+ if (!allocator) {
+ BUG();
+ return;
+ }
+
+ /* Link into the free list. */
+ list_add(&obj->siblings, &allocator->free_objs);
+ allocator->n_free_objects++;
+}
+
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+
+ if (!dev->allocator) {
+ BUG();
+ return;
+ }
+
+ yaffs_deinit_raw_tnodes(dev);
+ yaffs_deinit_raw_objs(dev);
+ kfree(dev->allocator);
+ dev->allocator = NULL;
+}
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_allocator *allocator;
+
+ if (dev->allocator) {
+ BUG();
+ return;
+ }
+
+ allocator = kmalloc(sizeof(struct yaffs_allocator), GFP_NOFS);
+ if (allocator) {
+ dev->allocator = allocator;
+ yaffs_init_raw_tnodes(dev);
+ yaffs_init_raw_objs(dev);
+ }
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.h
new file mode 100644
index 000000000..a8cc32264
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_allocator.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ALLOCATOR_H__
+#define __YAFFS_ALLOCATOR_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_init_raw_tnodes_and_objs(struct yaffs_dev *dev);
+void yaffs_deinit_raw_tnodes_and_objs(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_alloc_raw_tnode(struct yaffs_dev *dev);
+void yaffs_free_raw_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn);
+
+struct yaffs_obj *yaffs_alloc_raw_obj(struct yaffs_dev *dev);
+void yaffs_free_raw_obj(struct yaffs_dev *dev, struct yaffs_obj *obj);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.c
new file mode 100644
index 000000000..69664268e
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.c
@@ -0,0 +1,152 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_attribs.h"
+
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh)
+{
+ obj->yst_uid = oh->yst_uid;
+ obj->yst_gid = oh->yst_gid;
+ obj->yst_atime = oh->yst_atime;
+ obj->yst_mtime = oh->yst_mtime;
+ obj->yst_ctime = oh->yst_ctime;
+ obj->yst_rdev = oh->yst_rdev;
+}
+
+
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj)
+{
+#ifdef CONFIG_YAFFS_WINCE
+ oh->win_atime[0] = obj->win_atime[0];
+ oh->win_ctime[0] = obj->win_ctime[0];
+ oh->win_mtime[0] = obj->win_mtime[0];
+ oh->win_atime[1] = obj->win_atime[1];
+ oh->win_ctime[1] = obj->win_ctime[1];
+ oh->win_mtime[1] = obj->win_mtime[1];
+#else
+ oh->yst_uid = obj->yst_uid;
+ oh->yst_gid = obj->yst_gid;
+ oh->yst_atime = obj->yst_atime;
+ oh->yst_mtime = obj->yst_mtime;
+ oh->yst_ctime = obj->yst_ctime;
+ oh->yst_rdev = obj->yst_rdev;
+#endif
+
+}
+
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev)
+{
+
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_win_file_time_now(obj->win_atime);
+ obj->win_ctime[0] = obj->win_mtime[0] = obj->win_atime[0];
+ obj->win_ctime[1] = obj->win_mtime[1] = obj->win_atime[1];
+
+#else
+ yaffs_load_current_time(obj, 1, 1);
+ obj->yst_rdev = rdev;
+ obj->yst_uid = uid;
+ obj->yst_gid = gid;
+#endif
+}
+
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c)
+{
+#ifdef CONFIG_YAFFS_WINCE
+ yfsd_win_file_time_now(the_obj->win_atime);
+ the_obj->win_ctime[0] = the_obj->win_mtime[0] =
+ the_obj->win_atime[0];
+ the_obj->win_ctime[1] = the_obj->win_mtime[1] =
+ the_obj->win_atime[1];
+
+#else
+
+ obj->yst_mtime = Y_CURRENT_TIME;
+ if (do_a)
+ obj->yst_atime = obj->yst_atime;
+ if (do_c)
+ obj->yst_ctime = obj->yst_atime;
+#endif
+}
+
+loff_t yaffs_get_file_size(struct yaffs_obj *obj)
+{
+ YCHAR *alias = NULL;
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return obj->variant.file_variant.file_size;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = obj->variant.symlink_variant.alias;
+ if (!alias)
+ return 0;
+ return yaffs_strnlen(alias, YAFFS_MAX_ALIAS_LENGTH);
+ default:
+ return 0;
+ }
+}
+
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = attr->ia_valid;
+
+ if (valid & ATTR_MODE)
+ obj->yst_mode = attr->ia_mode;
+ if (valid & ATTR_UID)
+ obj->yst_uid = attr->ia_uid;
+ if (valid & ATTR_GID)
+ obj->yst_gid = attr->ia_gid;
+
+ if (valid & ATTR_ATIME)
+ obj->yst_atime = Y_TIME_CONVERT(attr->ia_atime);
+ if (valid & ATTR_CTIME)
+ obj->yst_ctime = Y_TIME_CONVERT(attr->ia_ctime);
+ if (valid & ATTR_MTIME)
+ obj->yst_mtime = Y_TIME_CONVERT(attr->ia_mtime);
+
+ if (valid & ATTR_SIZE)
+ yaffs_resize_file(obj, attr->ia_size);
+
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ return YAFFS_OK;
+
+}
+
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr)
+{
+ unsigned int valid = 0;
+
+ attr->ia_mode = obj->yst_mode;
+ valid |= ATTR_MODE;
+ attr->ia_uid = obj->yst_uid;
+ valid |= ATTR_UID;
+ attr->ia_gid = obj->yst_gid;
+ valid |= ATTR_GID;
+
+ Y_TIME_CONVERT(attr->ia_atime) = obj->yst_atime;
+ valid |= ATTR_ATIME;
+ Y_TIME_CONVERT(attr->ia_ctime) = obj->yst_ctime;
+ valid |= ATTR_CTIME;
+ Y_TIME_CONVERT(attr->ia_mtime) = obj->yst_mtime;
+ valid |= ATTR_MTIME;
+
+ attr->ia_size = yaffs_get_file_size(obj);
+ valid |= ATTR_SIZE;
+
+ attr->ia_valid = valid;
+
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.h
new file mode 100644
index 000000000..5b21b085b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_attribs.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_ATTRIBS_H__
+#define __YAFFS_ATTRIBS_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_load_attribs(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh);
+void yaffs_load_attribs_oh(struct yaffs_obj_hdr *oh, struct yaffs_obj *obj);
+void yaffs_attribs_init(struct yaffs_obj *obj, u32 gid, u32 uid, u32 rdev);
+void yaffs_load_current_time(struct yaffs_obj *obj, int do_a, int do_c);
+int yaffs_set_attribs(struct yaffs_obj *obj, struct iattr *attr);
+int yaffs_get_attribs(struct yaffs_obj *obj, struct iattr *attr);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.c
new file mode 100644
index 000000000..4440e930d
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.c
@@ -0,0 +1,97 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_bitmap.h"
+#include "yaffs_trace.h"
+/*
+ * Chunk bitmap manipulations
+ */
+
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "BlockBits block %d is not valid",
+ blk);
+ BUG();
+ }
+ return dev->chunk_bits +
+ (dev->chunk_bit_stride * (blk - dev->internal_start_block));
+}
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
+ chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Chunk Id (%d:%d) invalid",
+ blk, chunk);
+ BUG();
+ }
+}
+
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ memset(blk_bits, 0, dev->chunk_bit_stride);
+}
+
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
+}
+
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ blk_bits[chunk / 8] |= (1 << (chunk & 7));
+}
+
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+
+ yaffs_verify_chunk_bit_id(dev, blk, chunk);
+ return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
+}
+
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++) {
+ if (*blk_bits)
+ return 1;
+ blk_bits++;
+ }
+ return 0;
+}
+
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+{
+ u8 *blk_bits = yaffs_block_bits(dev, blk);
+ int i;
+ int n = 0;
+
+ for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
+ n += hweight8(*blk_bits);
+
+ return n;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.h
new file mode 100644
index 000000000..e26b37d89
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_bitmap.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Chunk bitmap manipulations
+ */
+
+#ifndef __YAFFS_BITMAP_H__
+#define __YAFFS_BITMAP_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.c
new file mode 100644
index 000000000..997a618ae
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.c
@@ -0,0 +1,408 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_checkptrw.h"
+#include "yaffs_getblockinfo.h"
+
+static int yaffs2_checkpt_space_ok(struct yaffs_dev *dev)
+{
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpt blocks_avail = %d", blocks_avail);
+
+ return (blocks_avail <= 0) ? 0 : 1;
+}
+
+static int yaffs_checkpt_erase(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (!dev->param.erase_fn)
+ return 0;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checking blocks %d to %d",
+ dev->internal_start_block, dev->internal_end_block);
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "erasing checkpt block %d", i);
+
+ dev->n_erasures++;
+
+ if (dev->param.
+ erase_fn(dev,
+ i - dev->block_offset /* realign */)) {
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ dev->n_free_chunks +=
+ dev->param.chunks_per_block;
+ } else {
+ dev->param.bad_block_fn(dev, i);
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ }
+ }
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ return 1;
+}
+
+static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
+{
+ int i;
+ int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block: erased %d reserved %d avail %d next %d ",
+ dev->n_erased_blocks, dev->param.n_reserved_blocks,
+ blocks_avail, dev->checkpt_next_block);
+
+ if (dev->checkpt_next_block >= 0 &&
+ dev->checkpt_next_block <= dev->internal_end_block &&
+ blocks_avail > 0) {
+
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, i);
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ dev->checkpt_next_block = i + 1;
+ dev->checkpt_cur_block = i;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "allocating checkpt block %d", i);
+ return;
+ }
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_ext_tags tags;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: start: blocks %d next %d",
+ dev->blocks_in_checkpt, dev->checkpt_next_block);
+
+ if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
+ for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
+ i++) {
+ int chunk = i * dev->param.chunks_per_block;
+ int realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->param.read_chunk_tags_fn(dev, realigned_chunk,
+ NULL, &tags);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "find next checkpt block: search: block %d oid %d seq %d eccr %d",
+ i, tags.obj_id, tags.seq_number,
+ tags.ecc_result);
+
+ if (tags.seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ /* Right kind of block */
+ dev->checkpt_next_block = tags.obj_id;
+ dev->checkpt_cur_block = i;
+ dev->checkpt_block_list[dev->
+ blocks_in_checkpt] = i;
+ dev->blocks_in_checkpt++;
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "found checkpt block %d", i);
+ return;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
+
+ dev->checkpt_next_block = -1;
+ dev->checkpt_cur_block = -1;
+}
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
+{
+ int i;
+
+ dev->checkpt_open_write = writing;
+
+ /* Got the functions we need? */
+ if (!dev->param.write_chunk_tags_fn ||
+ !dev->param.read_chunk_tags_fn ||
+ !dev->param.erase_fn || !dev->param.bad_block_fn)
+ return 0;
+
+ if (writing && !yaffs2_checkpt_space_ok(dev))
+ return 0;
+
+ if (!dev->checkpt_buffer)
+ dev->checkpt_buffer =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ dev->checkpt_page_seq = 0;
+ dev->checkpt_byte_count = 0;
+ dev->checkpt_sum = 0;
+ dev->checkpt_xor = 0;
+ dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_chunk = -1;
+ dev->checkpt_next_block = dev->internal_start_block;
+
+ /* Erase all the blocks in the checkpoint area */
+ if (writing) {
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+ dev->checkpt_byte_offs = 0;
+ return yaffs_checkpt_erase(dev);
+ }
+
+ /* Set to a value that will kick off a read */
+ dev->checkpt_byte_offs = dev->data_bytes_per_chunk;
+ /* A checkpoint block list of 1 checkpoint block per 16 block is
+ * (hopefully) going to be way more than we need */
+ dev->blocks_in_checkpt = 0;
+ dev->checkpt_max_blocks =
+ (dev->internal_end_block - dev->internal_start_block) / 16 + 2;
+ dev->checkpt_block_list =
+ kmalloc(sizeof(int) * dev->checkpt_max_blocks, GFP_NOFS);
+
+ if (!dev->checkpt_block_list)
+ return 0;
+
+ for (i = 0; i < dev->checkpt_max_blocks; i++)
+ dev->checkpt_block_list[i] = -1;
+
+ return 1;
+}
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum)
+{
+ u32 composite_sum;
+
+ composite_sum = (dev->checkpt_sum << 8) | (dev->checkpt_xor & 0xff);
+ *sum = composite_sum;
+ return 1;
+}
+
+static int yaffs2_checkpt_flush_buffer(struct yaffs_dev *dev)
+{
+ int chunk;
+ int realigned_chunk;
+ struct yaffs_ext_tags tags;
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_erased_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0)
+ return 0;
+
+ tags.is_deleted = 0;
+ tags.obj_id = dev->checkpt_next_block; /* Hint to next place to look */
+ tags.chunk_id = dev->checkpt_page_seq + 1;
+ tags.seq_number = YAFFS_SEQUENCE_CHECKPOINT_DATA;
+ tags.n_bytes = dev->data_bytes_per_chunk;
+ if (dev->checkpt_cur_chunk == 0) {
+ /* First chunk we write for the block? Set block state to
+ checkpoint */
+ struct yaffs_block_info *bi =
+ yaffs_get_block_info(dev, dev->checkpt_cur_block);
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ dev->blocks_in_checkpt++;
+ }
+
+ chunk =
+ dev->checkpt_cur_block * dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint wite buffer nand %d(%d:%d) objid %d chId %d",
+ chunk, dev->checkpt_cur_block, dev->checkpt_cur_chunk,
+ tags.obj_id, tags.chunk_id);
+
+ realigned_chunk = chunk - dev->chunk_offset;
+
+ dev->n_page_writes++;
+
+ dev->param.write_chunk_tags_fn(dev, realigned_chunk,
+ dev->checkpt_buffer, &tags);
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+ if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
+ dev->checkpt_cur_chunk = 0;
+ dev->checkpt_cur_block = -1;
+ }
+ memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
+
+ return 1;
+}
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (!dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+ dev->checkpt_buffer[dev->checkpt_byte_offs] = *data_bytes;
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ ok = yaffs2_checkpt_flush_buffer(dev);
+ }
+
+ return i;
+}
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes)
+{
+ int i = 0;
+ int ok = 1;
+ struct yaffs_ext_tags tags;
+ int chunk;
+ int realigned_chunk;
+ u8 *data_bytes = (u8 *) data;
+
+ if (!dev->checkpt_buffer)
+ return 0;
+
+ if (dev->checkpt_open_write)
+ return -1;
+
+ while (i < n_bytes && ok) {
+
+ if (dev->checkpt_byte_offs < 0 ||
+ dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+
+ if (dev->checkpt_cur_block < 0) {
+ yaffs2_checkpt_find_block(dev);
+ dev->checkpt_cur_chunk = 0;
+ }
+
+ if (dev->checkpt_cur_block < 0) {
+ ok = 0;
+ break;
+ }
+
+ chunk = dev->checkpt_cur_block *
+ dev->param.chunks_per_block +
+ dev->checkpt_cur_chunk;
+
+ realigned_chunk = chunk - dev->chunk_offset;
+ dev->n_page_reads++;
+
+ /* read in the next chunk */
+ dev->param.read_chunk_tags_fn(dev,
+ realigned_chunk,
+ dev->checkpt_buffer,
+ &tags);
+
+ if (tags.chunk_id != (dev->checkpt_page_seq + 1) ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.seq_number != YAFFS_SEQUENCE_CHECKPOINT_DATA) {
+ ok = 0;
+ break;
+ }
+
+ dev->checkpt_byte_offs = 0;
+ dev->checkpt_page_seq++;
+ dev->checkpt_cur_chunk++;
+
+ if (dev->checkpt_cur_chunk >=
+ dev->param.chunks_per_block)
+ dev->checkpt_cur_block = -1;
+ }
+
+ *data_bytes = dev->checkpt_buffer[dev->checkpt_byte_offs];
+ dev->checkpt_sum += *data_bytes;
+ dev->checkpt_xor ^= *data_bytes;
+ dev->checkpt_byte_offs++;
+ i++;
+ data_bytes++;
+ dev->checkpt_byte_count++;
+ }
+
+ return i;
+}
+
+int yaffs_checkpt_close(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->checkpt_open_write) {
+ if (dev->checkpt_byte_offs != 0)
+ yaffs2_checkpt_flush_buffer(dev);
+ } else if (dev->checkpt_block_list) {
+ for (i = 0;
+ i < dev->blocks_in_checkpt &&
+ dev->checkpt_block_list[i] >= 0; i++) {
+ int blk = dev->checkpt_block_list[i];
+ struct yaffs_block_info *bi = NULL;
+
+ if (dev->internal_start_block <= blk &&
+ blk <= dev->internal_end_block)
+ bi = yaffs_get_block_info(dev, blk);
+ if (bi && bi->block_state == YAFFS_BLOCK_STATE_EMPTY)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ }
+ kfree(dev->checkpt_block_list);
+ dev->checkpt_block_list = NULL;
+ }
+
+ dev->n_free_chunks -=
+ dev->blocks_in_checkpt * dev->param.chunks_per_block;
+ dev->n_erased_blocks -= dev->blocks_in_checkpt;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT, "checkpoint byte count %d",
+ dev->checkpt_byte_count);
+
+ if (dev->checkpt_buffer) {
+ /* free the buffer */
+ kfree(dev->checkpt_buffer);
+ dev->checkpt_buffer = NULL;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev)
+{
+ /* Erase the checkpoint data */
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "checkpoint invalidate of %d blocks",
+ dev->blocks_in_checkpt);
+
+ return yaffs_checkpt_erase(dev);
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.h
new file mode 100644
index 000000000..cdbaba715
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_checkptrw.h
@@ -0,0 +1,33 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_CHECKPTRW_H__
+#define __YAFFS_CHECKPTRW_H__
+
+#include "yaffs_guts.h"
+
+int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing);
+
+int yaffs2_checkpt_wr(struct yaffs_dev *dev, const void *data, int n_bytes);
+
+int yaffs2_checkpt_rd(struct yaffs_dev *dev, void *data, int n_bytes);
+
+int yaffs2_get_checkpt_sum(struct yaffs_dev *dev, u32 * sum);
+
+int yaffs_checkpt_close(struct yaffs_dev *dev);
+
+int yaffs2_checkpt_invalidate_stream(struct yaffs_dev *dev);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.c
new file mode 100644
index 000000000..9294107c1
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.c
@@ -0,0 +1,281 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data. Thus, two
+ * such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#include "yportenv.h"
+
+#include "yaffs_ecc.h"
+
+/* Table generated by gen-ecc.c
+ * Using a table means we do not have to calculate p1..p4 and p1'..p4'
+ * for each byte of data. These are instead provided in a table in bits7..2.
+ * Bit 0 of each entry indicates whether the entry has an odd or even parity,
+ * and therefore this bytes influence on the line parity.
+ */
+
+static const unsigned char column_parity_table[] = {
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
+ 0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
+ 0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
+ 0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
+ 0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
+ 0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
+ 0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
+ 0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
+ 0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
+ 0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
+ 0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
+ 0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
+ 0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
+ 0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
+ 0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
+ 0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
+};
+
+
+/* Calculate the ECC for a 256-byte block of data */
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned char line_parity = 0;
+ unsigned char line_parity_prime = 0;
+ unsigned char t;
+ unsigned char b;
+
+ for (i = 0; i < 256; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) { /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+ }
+
+ ecc[2] = (~col_parity) | 0x03;
+
+ t = 0;
+ if (line_parity & 0x80)
+ t |= 0x80;
+ if (line_parity_prime & 0x80)
+ t |= 0x40;
+ if (line_parity & 0x40)
+ t |= 0x20;
+ if (line_parity_prime & 0x40)
+ t |= 0x10;
+ if (line_parity & 0x20)
+ t |= 0x08;
+ if (line_parity_prime & 0x20)
+ t |= 0x04;
+ if (line_parity & 0x10)
+ t |= 0x02;
+ if (line_parity_prime & 0x10)
+ t |= 0x01;
+ ecc[1] = ~t;
+
+ t = 0;
+ if (line_parity & 0x08)
+ t |= 0x80;
+ if (line_parity_prime & 0x08)
+ t |= 0x40;
+ if (line_parity & 0x04)
+ t |= 0x20;
+ if (line_parity_prime & 0x04)
+ t |= 0x10;
+ if (line_parity & 0x02)
+ t |= 0x08;
+ if (line_parity_prime & 0x02)
+ t |= 0x04;
+ if (line_parity & 0x01)
+ t |= 0x02;
+ if (line_parity_prime & 0x01)
+ t |= 0x01;
+ ecc[0] = ~t;
+
+}
+
+/* Correct the ECC on a 256 byte block of data */
+
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc)
+{
+ unsigned char d0, d1, d2; /* deltas */
+
+ d0 = read_ecc[0] ^ test_ecc[0];
+ d1 = read_ecc[1] ^ test_ecc[1];
+ d2 = read_ecc[2] ^ test_ecc[2];
+
+ if ((d0 | d1 | d2) == 0)
+ return 0; /* no error */
+
+ if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
+ ((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
+ ((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
+ /* Single bit (recoverable) error in data */
+
+ unsigned byte;
+ unsigned bit;
+
+ bit = byte = 0;
+
+ if (d1 & 0x80)
+ byte |= 0x80;
+ if (d1 & 0x20)
+ byte |= 0x40;
+ if (d1 & 0x08)
+ byte |= 0x20;
+ if (d1 & 0x02)
+ byte |= 0x10;
+ if (d0 & 0x80)
+ byte |= 0x08;
+ if (d0 & 0x20)
+ byte |= 0x04;
+ if (d0 & 0x08)
+ byte |= 0x02;
+ if (d0 & 0x02)
+ byte |= 0x01;
+
+ if (d2 & 0x80)
+ bit |= 0x04;
+ if (d2 & 0x20)
+ bit |= 0x02;
+ if (d2 & 0x08)
+ bit |= 0x01;
+
+ data[byte] ^= (1 << bit);
+
+ return 1; /* Corrected the error */
+ }
+
+ if ((hweight8(d0) + hweight8(d1) + hweight8(d2)) == 1) {
+ /* Reccoverable error in ecc */
+
+ read_ecc[0] = test_ecc[0];
+ read_ecc[1] = test_ecc[1];
+ read_ecc[2] = test_ecc[2];
+
+ return 1; /* Corrected the error */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+
+}
+
+/*
+ * ECCxxxOther does ECC calcs on arbitrary n bytes of data
+ */
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc_other)
+{
+ unsigned int i;
+ unsigned char col_parity = 0;
+ unsigned line_parity = 0;
+ unsigned line_parity_prime = 0;
+ unsigned char b;
+
+ for (i = 0; i < n_bytes; i++) {
+ b = column_parity_table[*data++];
+ col_parity ^= b;
+
+ if (b & 0x01) {
+ /* odd number of bits in the byte */
+ line_parity ^= i;
+ line_parity_prime ^= ~i;
+ }
+
+ }
+
+ ecc_other->col_parity = (col_parity >> 2) & 0x3f;
+ ecc_other->line_parity = line_parity;
+ ecc_other->line_parity_prime = line_parity_prime;
+}
+
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc)
+{
+ unsigned char delta_col; /* column parity delta */
+ unsigned delta_line; /* line parity delta */
+ unsigned delta_line_prime; /* line parity delta */
+ unsigned bit;
+
+ delta_col = read_ecc->col_parity ^ test_ecc->col_parity;
+ delta_line = read_ecc->line_parity ^ test_ecc->line_parity;
+ delta_line_prime =
+ read_ecc->line_parity_prime ^ test_ecc->line_parity_prime;
+
+ if ((delta_col | delta_line | delta_line_prime) == 0)
+ return 0; /* no error */
+
+ if (delta_line == ~delta_line_prime &&
+ (((delta_col ^ (delta_col >> 1)) & 0x15) == 0x15)) {
+ /* Single bit (recoverable) error in data */
+
+ bit = 0;
+
+ if (delta_col & 0x20)
+ bit |= 0x04;
+ if (delta_col & 0x08)
+ bit |= 0x02;
+ if (delta_col & 0x02)
+ bit |= 0x01;
+
+ if (delta_line >= n_bytes)
+ return -1;
+
+ data[delta_line] ^= (1 << bit);
+
+ return 1; /* corrected */
+ }
+
+ if ((hweight32(delta_line) +
+ hweight32(delta_line_prime) +
+ hweight8(delta_col)) == 1) {
+ /* Reccoverable error in ecc */
+
+ *read_ecc = *test_ecc;
+ return 1; /* corrected */
+ }
+
+ /* Unrecoverable error */
+
+ return -1;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.h
new file mode 100644
index 000000000..17d47bd80
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_ecc.h
@@ -0,0 +1,44 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * This code implements the ECC algorithm used in SmartMedia.
+ *
+ * The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
+ * The two unused bit are set to 1.
+ * The ECC can correct single bit errors in a 256-byte page of data.
+ * Thus, two such ECC blocks are used on a 512-byte NAND page.
+ *
+ */
+
+#ifndef __YAFFS_ECC_H__
+#define __YAFFS_ECC_H__
+
+struct yaffs_ecc_other {
+ unsigned char col_parity;
+ unsigned line_parity;
+ unsigned line_parity_prime;
+};
+
+void yaffs_ecc_calc(const unsigned char *data, unsigned char *ecc);
+int yaffs_ecc_correct(unsigned char *data, unsigned char *read_ecc,
+ const unsigned char *test_ecc);
+
+void yaffs_ecc_calc_other(const unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *ecc);
+int yaffs_ecc_correct_other(unsigned char *data, unsigned n_bytes,
+ struct yaffs_ecc_other *read_ecc,
+ const struct yaffs_ecc_other *test_ecc);
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_error.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_error.c
new file mode 100644
index 000000000..11b75f7a0
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_error.c
@@ -0,0 +1,58 @@
+/*
+ * YAFFS: Yet another FFS. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Timothy Manning <timothy@yaffs.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffsfs.h"
+
+struct error_entry {
+ int code;
+ const char *text;
+};
+
+static const struct error_entry error_list[] = {
+ { ENOMEM , "ENOMEM" },
+ { EBUSY , "EBUSY"},
+ { ENODEV , "ENODEV"},
+ { EINVAL , "EINVAL"},
+ { EBADF , "EBADF"},
+ { EACCES , "EACCES"},
+ { EXDEV , "EXDEV" },
+ { ENOENT , "ENOENT"},
+ { ENOSPC , "ENOSPC"},
+ { ERANGE , "ERANGE"},
+ { ENODATA, "ENODATA"},
+ { ENOTEMPTY, "ENOTEMPTY"},
+ { ENAMETOOLONG, "ENAMETOOLONG"},
+ { ENOMEM , "ENOMEM"},
+ { EEXIST , "EEXIST"},
+ { ENOTDIR , "ENOTDIR"},
+ { EISDIR , "EISDIR"},
+ { ENFILE, "ENFILE"},
+ { EROFS, "EROFS"},
+ { EFAULT, "EFAULT"},
+ { 0, NULL }
+};
+
+const char *yaffs_error_to_str(int err)
+{
+ const struct error_entry *e = error_list;
+
+ if (err < 0)
+ err = -err;
+
+ while (e->code && e->text) {
+ if (err == e->code)
+ return e->text;
+ e++;
+ }
+ return "Unknown error code";
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_flashif.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_flashif.h
new file mode 100644
index 000000000..e6e8979ee
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_flashif.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_FLASH_H__
+#define __YAFFS_FLASH_H__
+
+
+#include "yaffs_guts.h"
+int yflash_EraseBlockInNAND(struct yaffs_dev *dev, int blockNumber);
+int yflash_WriteChunkToNAND(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, const struct yaffs_spare *spare);
+int yflash_WriteChunkWithTagsToNAND(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *tags);
+int yflash_ReadChunkFromNAND(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, struct yaffs_spare *spare);
+int yflash_ReadChunkWithTagsFromNAND(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags);
+int yflash_InitialiseNAND(struct yaffs_dev *dev);
+int yflash_MarkNANDBlockBad(struct yaffs_dev *dev, int block_no);
+int yflash_QueryNANDBlock(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 *seq_number);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_flashif2.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_flashif2.h
new file mode 100644
index 000000000..cfdbde9dc
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_flashif2.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_FLASH2_H__
+#define __YAFFS_FLASH2_H__
+
+
+#include "yaffs_guts.h"
+int yflash2_EraseBlockInNAND(struct yaffs_dev *dev, int blockNumber);
+int yflash2_WriteChunkToNAND(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, const struct yaffs_spare *spare);
+int yflash2_WriteChunkWithTagsToNAND(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *tags);
+int yflash2_ReadChunkFromNAND(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, struct yaffs_spare *spare);
+int yflash2_ReadChunkWithTagsFromNAND(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags);
+int yflash2_InitialiseNAND(struct yaffs_dev *dev);
+int yflash2_MarkNANDBlockBad(struct yaffs_dev *dev, int block_no);
+int yflash2_QueryNANDBlock(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 *seq_number);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_getblockinfo.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_getblockinfo.h
new file mode 100644
index 000000000..8fd0802bd
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_getblockinfo.h
@@ -0,0 +1,35 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GETBLOCKINFO_H__
+#define __YAFFS_GETBLOCKINFO_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+
+/* Function to manipulate block info */
+static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
+ *dev, int blk)
+{
+ if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs: get_block_info block %d is not valid",
+ blk);
+ BUG();
+ }
+ return &dev->block_info[blk - dev->internal_start_block];
+}
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_guts.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_guts.c
new file mode 100644
index 000000000..21441fd99
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_guts.c
@@ -0,0 +1,5021 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_guts.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_nand.h"
+#include "yaffs_yaffs1.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_verify.h"
+#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nameval.h"
+#include "yaffs_allocator.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */
+#define YAFFS_GC_GOOD_ENOUGH 2
+#define YAFFS_GC_PASSIVE_THRESHOLD 4
+
+#include "yaffs_ecc.h"
+
+/* Forward declarations */
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve);
+
+
+
+/* Function to calculate chunk and offset */
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out)
+{
+ int chunk;
+ u32 offset;
+
+ chunk = (u32) (addr >> dev->chunk_shift);
+
+ if (dev->chunk_div == 1) {
+ /* easy power of 2 case */
+ offset = (u32) (addr & dev->chunk_mask);
+ } else {
+ /* Non power-of-2 case */
+
+ loff_t chunk_base;
+
+ chunk /= dev->chunk_div;
+
+ chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk;
+ offset = (u32) (addr - chunk_base);
+ }
+
+ *chunk_out = chunk;
+ *offset_out = offset;
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or
+ * equal to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static inline u32 calc_shifts_ceiling(u32 x)
+{
+ int extra_bits;
+ int shifts;
+
+ shifts = extra_bits = 0;
+
+ while (x > 1) {
+ if (x & 1)
+ extra_bits++;
+ x >>= 1;
+ shifts++;
+ }
+
+ if (extra_bits)
+ shifts++;
+
+ return shifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static inline u32 calc_shifts(u32 x)
+{
+ u32 shifts;
+
+ shifts = 0;
+
+ if (!x)
+ return 0;
+
+ while (!(x & 1)) {
+ x >>= 1;
+ shifts++;
+ }
+
+ return shifts;
+}
+
+/*
+ * Temporary buffer manipulations.
+ */
+
+static int yaffs_init_tmp_buffers(struct yaffs_dev *dev)
+{
+ int i;
+ u8 *buf = (u8 *) 1;
+
+ memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer));
+
+ for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) {
+ dev->temp_buffer[i].in_use = 0;
+ buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ dev->temp_buffer[i].buffer = buf;
+ }
+
+ return buf ? YAFFS_OK : YAFFS_FAIL;
+}
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev)
+{
+ int i;
+
+ dev->temp_in_use++;
+ if (dev->temp_in_use > dev->max_temp)
+ dev->max_temp = dev->temp_in_use;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].in_use == 0) {
+ dev->temp_buffer[i].in_use = 1;
+ return dev->temp_buffer[i].buffer;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers");
+ /*
+ * If we got here then we have to allocate an unmanaged one
+ * This is not good.
+ */
+
+ dev->unmanaged_buffer_allocs++;
+ return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS);
+
+}
+
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer)
+{
+ int i;
+
+ dev->temp_in_use--;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer) {
+ dev->temp_buffer[i].in_use = 0;
+ return;
+ }
+ }
+
+ if (buffer) {
+ /* assume it is an unmanaged one. */
+ yaffs_trace(YAFFS_TRACE_BUFFERS,
+ "Releasing unmanaged temp buffer");
+ kfree(buffer);
+ dev->unmanaged_buffer_deallocs++;
+ }
+
+}
+
+/*
+ * Determine if we have a managed buffer.
+ */
+int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 *buffer)
+{
+ int i;
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
+ if (dev->temp_buffer[i].buffer == buffer)
+ return 1;
+ }
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].data == buffer)
+ return 1;
+ }
+
+ if (buffer == dev->checkpt_buffer)
+ return 1;
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: unmaged buffer detected.");
+ return 0;
+}
+
+/*
+ * Functions for robustisizing TODO
+ *
+ */
+
+static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ data = data;
+ tags = tags;
+}
+
+static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk,
+ const struct yaffs_ext_tags *tags)
+{
+ dev = dev;
+ nand_chunk = nand_chunk;
+ tags = tags;
+}
+
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+ if (!bi->gc_prioritise) {
+ bi->gc_prioritise = 1;
+ dev->has_pending_prioritised_gc = 1;
+ bi->chunk_error_strikes++;
+
+ if (bi->chunk_error_strikes > 3) {
+ bi->needs_retiring = 1; /* Too many stikes, so retire */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Block struck out");
+
+ }
+ }
+}
+
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+ int erased_ok)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs_handle_chunk_error(dev, bi);
+
+ if (erased_ok) {
+ /* Was an actual write failure,
+ * so mark the block for retirement.*/
+ bi->needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d needs retiring", flash_block);
+ }
+
+ /* Delete the chunk */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+}
+
+/*
+ * Verification code
+ */
+
+/*
+ * Simple hash function. Needs to have a reasonable spread
+ */
+
+static inline int yaffs_hash_fn(int n)
+{
+ if (n < 0)
+ n = -n;
+ return n % YAFFS_NOBJECT_BUCKETS;
+}
+
+/*
+ * Access functions to useful fake objects.
+ * Note that root might have a presence in NAND if permissions are set.
+ */
+
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev)
+{
+ return dev->root_dir;
+}
+
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev)
+{
+ return dev->lost_n_found;
+}
+
+/*
+ * Erased NAND checking functions
+ */
+
+int yaffs_check_ff(u8 *buffer, int n_bytes)
+{
+ /* Horrible, slow implementation */
+ while (n_bytes--) {
+ if (*buffer != 0xff)
+ return 0;
+ buffer++;
+ }
+ return 1;
+}
+
+static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk)
+{
+ int retval = YAFFS_OK;
+ u8 *data = yaffs_get_temp_buffer(dev);
+ struct yaffs_ext_tags tags;
+
+ yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+
+ if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
+ if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) ||
+ tags.chunk_used) {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not erased", nand_chunk);
+ retval = YAFFS_FAIL;
+ }
+
+ yaffs_release_temp_buffer(dev, data);
+
+ return retval;
+
+}
+
+static int yaffs_verify_chunk_written(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data,
+ struct yaffs_ext_tags *tags)
+{
+ int retval = YAFFS_OK;
+ struct yaffs_ext_tags temp_tags;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
+ temp_tags.obj_id != tags->obj_id ||
+ temp_tags.chunk_id != tags->chunk_id ||
+ temp_tags.n_bytes != tags->n_bytes)
+ retval = YAFFS_FAIL;
+
+ yaffs_release_temp_buffer(dev, buffer);
+
+ return retval;
+}
+
+
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks)
+{
+ int reserved_chunks;
+ int reserved_blocks = dev->param.n_reserved_blocks;
+ int checkpt_blocks;
+
+ checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev);
+
+ reserved_chunks =
+ (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block;
+
+ return (dev->n_free_chunks > (reserved_chunks + n_chunks));
+}
+
+static int yaffs_find_alloc_block(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_block_info *bi;
+
+ if (dev->n_erased_blocks < 1) {
+ /* Hoosterman we've got a problem.
+ * Can't get space to gc
+ */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no more erased blocks");
+
+ return -1;
+ }
+
+ /* Find an empty block. */
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ dev->alloc_block_finder++;
+ if (dev->alloc_block_finder < dev->internal_start_block
+ || dev->alloc_block_finder > dev->internal_end_block) {
+ dev->alloc_block_finder = dev->internal_start_block;
+ }
+
+ bi = yaffs_get_block_info(dev, dev->alloc_block_finder);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->seq_number++;
+ bi->seq_number = dev->seq_number;
+ dev->n_erased_blocks--;
+ yaffs_trace(YAFFS_TRACE_ALLOCATE,
+ "Allocated block %d, seq %d, %d left" ,
+ dev->alloc_block_finder, dev->seq_number,
+ dev->n_erased_blocks);
+ return dev->alloc_block_finder;
+ }
+ }
+
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs tragedy: no more erased blocks, but there should have been %d",
+ dev->n_erased_blocks);
+
+ return -1;
+}
+
+static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver,
+ struct yaffs_block_info **block_ptr)
+{
+ int ret_val;
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block < 0) {
+ /* Get next block to allocate off */
+ dev->alloc_block = yaffs_find_alloc_block(dev);
+ dev->alloc_page = 0;
+ }
+
+ if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) {
+ /* No space unless we're allowed to use the reserve. */
+ return -1;
+ }
+
+ if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ && dev->alloc_page == 0)
+ yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
+
+ /* Next page please.... */
+ if (dev->alloc_block >= 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+
+ ret_val = (dev->alloc_block * dev->param.chunks_per_block) +
+ dev->alloc_page;
+ bi->pages_in_use++;
+ yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page);
+
+ dev->alloc_page++;
+
+ dev->n_free_chunks--;
+
+ /* If the block is full set the state to full */
+ if (dev->alloc_page >= dev->param.chunks_per_block) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ if (block_ptr)
+ *block_ptr = bi;
+
+ return ret_val;
+ }
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!");
+
+ return -1;
+}
+
+static int yaffs_get_erased_chunks(struct yaffs_dev *dev)
+{
+ int n;
+
+ n = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ if (dev->alloc_block > 0)
+ n += (dev->param.chunks_per_block - dev->alloc_page);
+
+ return n;
+
+}
+
+/*
+ * yaffs_skip_rest_of_block() skips over the rest of the allocation block
+ * if we don't want to write to it.
+ */
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev)
+{
+ struct yaffs_block_info *bi;
+
+ if (dev->alloc_block > 0) {
+ bi = yaffs_get_block_info(dev, dev->alloc_block);
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+ }
+}
+
+static int yaffs_write_new_chunk(struct yaffs_dev *dev,
+ const u8 *data,
+ struct yaffs_ext_tags *tags, int use_reserver)
+{
+ int attempts = 0;
+ int write_ok = 0;
+ int chunk;
+
+ yaffs2_checkpt_invalidate(dev);
+
+ do {
+ struct yaffs_block_info *bi = 0;
+ int erased_ok = 0;
+
+ chunk = yaffs_alloc_chunk(dev, use_reserver, &bi);
+ if (chunk < 0) {
+ /* no space */
+ break;
+ }
+
+ /* First check this chunk is erased, if it needs
+ * checking. The checking policy (unless forced
+ * always on) is as follows:
+ *
+ * Check the first page we try to write in a block.
+ * If the check passes then we don't need to check any
+ * more. If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc,
+ * then we think there might be something odd about
+ * this block and stop using it.
+ *
+ * Rationale: We should only ever see chunks that have
+ * not been erased if there was a partially written
+ * chunk due to power loss. This checking policy should
+ * catch that case with very few checks and thus save a
+ * lot of checks that are most likely not needed.
+ *
+ * Mods to the above
+ * If an erase check fails or the write fails we skip the
+ * rest of the block.
+ */
+
+ /* let's give it a try */
+ attempts++;
+
+ if (dev->param.always_check_erased)
+ bi->skip_erased_check = 0;
+
+ if (!bi->skip_erased_check) {
+ erased_ok = yaffs_check_chunk_erased(dev, chunk);
+ if (erased_ok != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d was not erased",
+ chunk);
+
+ /* If not erased, delete this one,
+ * skip rest of block and
+ * try another chunk */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ yaffs_skip_rest_of_block(dev);
+ continue;
+ }
+ }
+
+ write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags);
+
+ if (!bi->skip_erased_check)
+ write_ok =
+ yaffs_verify_chunk_written(dev, chunk, data, tags);
+
+ if (write_ok != YAFFS_OK) {
+ /* Clean up aborted write, skip to next block and
+ * try another chunk */
+ yaffs_handle_chunk_wr_error(dev, chunk, erased_ok);
+ continue;
+ }
+
+ bi->skip_erased_check = 1;
+
+ /* Copy the data into the robustification buffer */
+ yaffs_handle_chunk_wr_ok(dev, chunk, data, tags);
+
+ } while (write_ok != YAFFS_OK &&
+ (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts));
+
+ if (!write_ok)
+ chunk = -1;
+
+ if (attempts > 1) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs write required %d attempts",
+ attempts);
+ dev->n_retried_writes += (attempts - 1);
+ }
+
+ return chunk;
+}
+
+/*
+ * Block retiring for handling a broken block.
+ */
+
+static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
+
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) {
+ if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to mark bad and erase block %d",
+ flash_block);
+ } else {
+ struct yaffs_ext_tags tags;
+ int chunk_id =
+ flash_block * dev->param.chunks_per_block;
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ memset(&tags, 0, sizeof(tags));
+ tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK;
+ if (dev->param.write_chunk_tags_fn(dev, chunk_id -
+ dev->chunk_offset,
+ buffer,
+ &tags) != YAFFS_OK)
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Failed to write bad block marker to block %d",
+ flash_block);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+ }
+
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+ bi->gc_prioritise = 0;
+ bi->needs_retiring = 0;
+
+ dev->n_retired_blocks++;
+}
+
+/*---------------- Name handling functions ------------*/
+
+static u16 yaffs_calc_name_sum(const YCHAR *name)
+{
+ u16 sum = 0;
+ u16 i = 1;
+
+ if (!name)
+ return 0;
+
+ while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) {
+
+ /* 0x1f mask is case insensitive */
+ sum += ((*name) & 0x1f) * i;
+ i++;
+ name++;
+ }
+ return sum;
+}
+
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name)
+{
+ memset(obj->short_name, 0, sizeof(obj->short_name));
+ if (name &&
+ yaffs_strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <=
+ YAFFS_SHORT_NAME_LENGTH)
+ yaffs_strcpy(obj->short_name, name);
+ else
+ obj->short_name[0] = _Y('\0');
+ obj->sum = yaffs_calc_name_sum(name);
+}
+
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1];
+ memset(tmp_name, 0, sizeof(tmp_name));
+ yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ yaffs_set_obj_name(obj, tmp_name);
+#else
+ yaffs_set_obj_name(obj, oh->name);
+#endif
+}
+
+loff_t yaffs_max_file_size(struct yaffs_dev *dev)
+{
+ return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk;
+}
+
+/*-------------------- TNODES -------------------
+
+ * List of spare tnodes
+ * The list is hooked together using the first pointer
+ * in the tnode.
+ */
+
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev)
+{
+ struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev);
+
+ if (tn) {
+ memset(tn, 0, dev->tnode_size);
+ dev->n_tnodes++;
+ }
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return tn;
+}
+
+/* FreeTnode frees up a tnode and puts it back on the free list */
+static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn)
+{
+ yaffs_free_raw_tnode(dev, tn);
+ dev->n_tnodes--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ yaffs_deinit_raw_tnodes_and_objs(dev);
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+}
+
+void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos, unsigned val)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 mask;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+ val >>= dev->chunk_grp_bits;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ mask = dev->tnode_mask << bit_in_word;
+
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val << bit_in_word));
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ mask =
+ dev->tnode_mask >> bit_in_word;
+ map[word_in_map] &= ~mask;
+ map[word_in_map] |= (mask & (val >> bit_in_word));
+ }
+}
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos)
+{
+ u32 *map = (u32 *) tn;
+ u32 bit_in_map;
+ u32 bit_in_word;
+ u32 word_in_map;
+ u32 val;
+
+ pos &= YAFFS_TNODES_LEVEL0_MASK;
+
+ bit_in_map = pos * dev->tnode_width;
+ word_in_map = bit_in_map / 32;
+ bit_in_word = bit_in_map & (32 - 1);
+
+ val = map[word_in_map] >> bit_in_word;
+
+ if (dev->tnode_width > (32 - bit_in_word)) {
+ bit_in_word = (32 - bit_in_word);
+ word_in_map++;
+ val |= (map[word_in_map] << bit_in_word);
+ }
+
+ val &= dev->tnode_mask;
+ val <<= dev->chunk_grp_bits;
+
+ return val;
+}
+
+/* ------------------- End of individual tnode manipulation -----------------*/
+
+/* ---------Functions to manipulate the look-up tree (made up of tnodes) ------
+ * The look up tree is represented by the top tnode and the number of top_level
+ * in the tree. 0 means only the level 0 tnode is in the tree.
+ */
+
+/* FindLevel0Tnode finds the level 0 tnode, if one exists. */
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id)
+{
+ struct yaffs_tnode *tn = file_struct->top;
+ u32 i;
+ int required_depth;
+ int level = file_struct->top_level;
+
+ dev = dev;
+
+ /* Check sane level and chunk Id */
+ if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (i) {
+ i >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level)
+ return NULL; /* Not tall enough, so we can't find it */
+
+ /* Traverse down to level 0 */
+ while (level > 0 && tn) {
+ tn = tn->internal[(chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (level - 1) *
+ YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK];
+ level--;
+ }
+
+ return tn;
+}
+
+/* add_find_tnode_0 finds the level 0 tnode if it exists,
+ * otherwise first expands the tree.
+ * This happens in two steps:
+ * 1. If the tree isn't tall enough, then make it taller.
+ * 2. Scan down the tree towards the level 0 tnode adding tnodes if required.
+ *
+ * Used when modifying the tree.
+ *
+ * If the tn argument is NULL, then a fresh tnode will be added otherwise the
+ * specified tn will be plugged into the ttree.
+ */
+
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn)
+{
+ int required_depth;
+ int i;
+ int l;
+ struct yaffs_tnode *tn;
+ u32 x;
+
+ /* Check sane level and page Id */
+ if (file_struct->top_level < 0 ||
+ file_struct->top_level > YAFFS_TNODES_MAX_LEVEL)
+ return NULL;
+
+ if (chunk_id > YAFFS_MAX_CHUNK_ID)
+ return NULL;
+
+ /* First check we're tall enough (ie enough top_level) */
+
+ x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ if (required_depth > file_struct->top_level) {
+ /* Not tall enough, gotta make the tree taller */
+ for (i = file_struct->top_level; i < required_depth; i++) {
+
+ tn = yaffs_get_tnode(dev);
+
+ if (tn) {
+ tn->internal[0] = file_struct->top;
+ file_struct->top = tn;
+ file_struct->top_level++;
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs: no more tnodes");
+ return NULL;
+ }
+ }
+ }
+
+ /* Traverse down to level 0, adding anything we need */
+
+ l = file_struct->top_level;
+ tn = file_struct->top;
+
+ if (l > 0) {
+ while (l > 0 && tn) {
+ x = (chunk_id >>
+ (YAFFS_TNODES_LEVEL0_BITS +
+ (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) &
+ YAFFS_TNODES_INTERNAL_MASK;
+
+ if ((l > 1) && !tn->internal[x]) {
+ /* Add missing non-level-zero tnode */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ } else if (l == 1) {
+ /* Looking from level 1 at level 0 */
+ if (passed_tn) {
+ /* If we already have one, release it */
+ if (tn->internal[x])
+ yaffs_free_tnode(dev,
+ tn->internal[x]);
+ tn->internal[x] = passed_tn;
+
+ } else if (!tn->internal[x]) {
+ /* Don't have one, none passed in */
+ tn->internal[x] = yaffs_get_tnode(dev);
+ if (!tn->internal[x])
+ return NULL;
+ }
+ }
+
+ tn = tn->internal[x];
+ l--;
+ }
+ } else {
+ /* top is level 0 */
+ if (passed_tn) {
+ memcpy(tn, passed_tn,
+ (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8);
+ yaffs_free_tnode(dev, passed_tn);
+ }
+ }
+
+ return tn;
+}
+
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
+ int chunk_obj)
+{
+ return (tags->chunk_id == chunk_obj &&
+ tags->obj_id == obj_id &&
+ !tags->is_deleted) ? 1 : 0;
+
+}
+
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
+ struct yaffs_ext_tags *tags, int obj_id,
+ int inode_chunk)
+{
+ int j;
+
+ for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
+ if (yaffs_check_chunk_bit
+ (dev, the_chunk / dev->param.chunks_per_block,
+ the_chunk % dev->param.chunks_per_block)) {
+
+ if (dev->chunk_grp_size == 1)
+ return the_chunk;
+ else {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ tags);
+ if (yaffs_tags_match(tags,
+ obj_id, inode_chunk)) {
+ /* found it; */
+ return the_chunk;
+ }
+ }
+ }
+ the_chunk++;
+ }
+ return -1;
+}
+
+static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /*Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ int ret_val = -1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+ return ret_val;
+}
+
+static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk,
+ struct yaffs_ext_tags *tags)
+{
+ /* Get the Tnode, then get the level 0 offset chunk offset */
+ struct yaffs_tnode *tn;
+ int the_chunk = -1;
+ struct yaffs_ext_tags local_tags;
+ struct yaffs_dev *dev = in->my_dev;
+ int ret_val = -1;
+
+ if (!tags) {
+ /* Passed a NULL, so use our own tags space */
+ tags = &local_tags;
+ }
+
+ tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk);
+
+ if (!tn)
+ return ret_val;
+
+ the_chunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id,
+ inode_chunk);
+
+ /* Delete the entry in the filestructure (if found) */
+ if (ret_val != -1)
+ yaffs_load_tnode_0(dev, tn, inode_chunk, 0);
+
+ return ret_val;
+}
+
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan)
+{
+ /* NB in_scan is zero unless scanning.
+ * For forward scanning, in_scan is > 0;
+ * for backward scanning in_scan is < 0
+ *
+ * nand_chunk = 0 is a dummy insert to make sure the tnodes are there.
+ */
+
+ struct yaffs_tnode *tn;
+ struct yaffs_dev *dev = in->my_dev;
+ int existing_cunk;
+ struct yaffs_ext_tags existing_tags;
+ struct yaffs_ext_tags new_tags;
+ unsigned existing_serial, new_serial;
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) {
+ /* Just ignore an attempt at putting a chunk into a non-file
+ * during scanning.
+ * If it is not during Scanning then something went wrong!
+ */
+ if (!in_scan) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy:attempt to put data chunk into a non-file"
+ );
+ BUG();
+ }
+
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+
+ tn = yaffs_add_find_tnode_0(dev,
+ &in->variant.file_variant,
+ inode_chunk, NULL);
+ if (!tn)
+ return YAFFS_FAIL;
+
+ if (!nand_chunk)
+ /* Dummy insert, bail now */
+ return YAFFS_OK;
+
+ existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk);
+
+ if (in_scan != 0) {
+ /* If we're scanning then we need to test for duplicates
+ * NB This does not need to be efficient since it should only
+ * happen when the power fails during a write, then only one
+ * chunk should ever be affected.
+ *
+ * Correction for YAFFS2: This could happen quite a lot and we
+ * need to think about efficiency! TODO
+ * Update: For backward scanning we don't need to re-read tags
+ * so this is quite cheap.
+ */
+
+ if (existing_cunk > 0) {
+ /* NB Right now existing chunk will not be real
+ * chunk_id if the chunk group size > 1
+ * thus we have to do a FindChunkInFile to get the
+ * real chunk id.
+ *
+ * We have a duplicate now we need to decide which
+ * one to use:
+ *
+ * Backwards scanning YAFFS2: The old one is what
+ * we use, dump the new one.
+ * YAFFS1: Get both sets of tags and compare serial
+ * numbers.
+ */
+
+ if (in_scan > 0) {
+ /* Only do this for forward scanning */
+ yaffs_rd_chunk_tags_nand(dev,
+ nand_chunk,
+ NULL, &new_tags);
+
+ /* Do a proper find */
+ existing_cunk =
+ yaffs_find_chunk_in_file(in, inode_chunk,
+ &existing_tags);
+ }
+
+ if (existing_cunk <= 0) {
+ /*Hoosterman - how did this happen? */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: existing chunk < 0 in scan"
+ );
+
+ }
+
+ /* NB The deleted flags should be false, otherwise
+ * the chunks will not be loaded during a scan
+ */
+
+ if (in_scan > 0) {
+ new_serial = new_tags.serial_number;
+ existing_serial = existing_tags.serial_number;
+ }
+
+ if ((in_scan > 0) &&
+ (existing_cunk <= 0 ||
+ ((existing_serial + 1) & 3) == new_serial)) {
+ /* Forward scanning.
+ * Use new
+ * Delete the old one and drop through to
+ * update the tnode
+ */
+ yaffs_chunk_del(dev, existing_cunk, 1,
+ __LINE__);
+ } else {
+ /* Backward scanning or we want to use the
+ * existing one
+ * Delete the new one and return early so that
+ * the tnode isn't changed
+ */
+ yaffs_chunk_del(dev, nand_chunk, 1, __LINE__);
+ return YAFFS_OK;
+ }
+ }
+
+ }
+
+ if (existing_cunk == 0)
+ in->n_data_chunks++;
+
+ yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk);
+
+ return YAFFS_OK;
+}
+
+static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk)
+{
+ struct yaffs_block_info *the_block;
+ unsigned block_no;
+
+ yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk);
+
+ block_no = chunk / dev->param.chunks_per_block;
+ the_block = yaffs_get_block_info(dev, block_no);
+ if (the_block) {
+ the_block->soft_del_pages++;
+ dev->n_free_chunks++;
+ yaffs2_update_oldest_dirty_seq(dev, block_no, the_block);
+ }
+}
+
+/* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all
+ * the chunks in the file.
+ * All soft deleting does is increment the block's softdelete count and pulls
+ * the chunk out of the tnode.
+ * Thus, essentially this is the same as DeleteWorker except that the chunks
+ * are soft deleted.
+ */
+
+static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn,
+ u32 level, int chunk_offset)
+{
+ int i;
+ int the_chunk;
+ int all_done = 1;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = YAFFS_NTNODES_INTERNAL - 1;
+ all_done && i >= 0;
+ i--) {
+ if (tn->internal[i]) {
+ all_done =
+ yaffs_soft_del_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS)
+ + i);
+ if (all_done) {
+ yaffs_free_tnode(dev,
+ tn->internal[i]);
+ tn->internal[i] = NULL;
+ } else {
+ /* Can this happen? */
+ }
+ }
+ }
+ return (all_done) ? 1 : 0;
+ }
+
+ /* level 0 */
+ for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) {
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk) {
+ yaffs_soft_del_chunk(dev, the_chunk);
+ yaffs_load_tnode_0(dev, tn, i, 0);
+ }
+ }
+ return 1;
+}
+
+static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_obj *parent;
+
+ yaffs_verify_obj_in_dir(obj);
+ parent = obj->parent;
+
+ yaffs_verify_dir(parent);
+
+ if (dev && dev->param.remove_obj_fn)
+ dev->param.remove_obj_fn(obj);
+
+ list_del_init(&obj->siblings);
+ obj->parent = NULL;
+
+ yaffs_verify_dir(parent);
+}
+
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj)
+{
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a null pointer directory"
+ );
+ BUG();
+ return;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: Trying to add an object to a non-directory"
+ );
+ BUG();
+ }
+
+ if (obj->siblings.prev == NULL) {
+ /* Not initialised */
+ BUG();
+ }
+
+ yaffs_verify_dir(directory);
+
+ yaffs_remove_obj_from_dir(obj);
+
+ /* Now add it */
+ list_add(&obj->siblings, &directory->variant.dir_variant.children);
+ obj->parent = directory;
+
+ if (directory == obj->my_dev->unlinked_dir
+ || directory == obj->my_dev->del_dir) {
+ obj->unlinked = 1;
+ obj->my_dev->n_unlinked_files++;
+ obj->rename_allowed = 0;
+ }
+
+ yaffs_verify_dir(directory);
+ yaffs_verify_obj_in_dir(obj);
+}
+
+static int yaffs_change_obj_name(struct yaffs_obj *obj,
+ struct yaffs_obj *new_dir,
+ const YCHAR *new_name, int force, int shadows)
+{
+ int unlink_op;
+ int del_op;
+ struct yaffs_obj *existing_target;
+
+ if (new_dir == NULL)
+ new_dir = obj->parent; /* use the old directory */
+
+ if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_change_obj_name: new_dir is not a directory"
+ );
+ BUG();
+ }
+
+ unlink_op = (new_dir == obj->my_dev->unlinked_dir);
+ del_op = (new_dir == obj->my_dev->del_dir);
+
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+
+ /* If the object is a file going into the unlinked directory,
+ * then it is OK to just stuff it in since duplicate names are OK.
+ * else only proceed if the new name does not exist and we're putting
+ * it into a directory.
+ */
+ if (!(unlink_op || del_op || force ||
+ shadows > 0 || !existing_target) ||
+ new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ return YAFFS_FAIL;
+
+ yaffs_set_obj_name(obj, new_name);
+ obj->dirty = 1;
+ yaffs_add_obj_to_dir(new_dir, obj);
+
+ if (unlink_op)
+ obj->unlinked = 1;
+
+ /* If it is a deletion then we mark it as a shrink for gc */
+ if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0)
+ return YAFFS_OK;
+
+ return YAFFS_FAIL;
+}
+
+/*------------------------ Short Operations Cache ------------------------------
+ * In many situations where there is no high level buffering a lot of
+ * reads might be short sequential reads, and a lot of writes may be short
+ * sequential writes. eg. scanning/writing a jpeg file.
+ * In these cases, a short read/write cache can provide a huge perfomance
+ * benefit with dumb-as-a-rock code.
+ * In Linux, the page cache provides read buffering and the short op cache
+ * provides write buffering.
+ *
+ * There are a small number (~10) of cache chunks per device so that we don't
+ * need a very intelligent search.
+ */
+
+static int yaffs_obj_cache_dirty(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+ struct yaffs_cache *cache;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ for (i = 0; i < n_caches; i++) {
+ cache = &dev->cache[i];
+ if (cache->object == obj && cache->dirty)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void yaffs_flush_file_cache(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int lowest = -99; /* Stop compiler whining. */
+ int i;
+ struct yaffs_cache *cache;
+ int chunk_written = 0;
+ int n_caches = obj->my_dev->param.n_caches;
+
+ if (n_caches < 1)
+ return;
+ do {
+ cache = NULL;
+
+ /* Find the lowest dirty chunk for this object */
+ for (i = 0; i < n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].dirty) {
+ if (!cache ||
+ dev->cache[i].chunk_id < lowest) {
+ cache = &dev->cache[i];
+ lowest = cache->chunk_id;
+ }
+ }
+ }
+
+ if (cache && !cache->locked) {
+ /* Write it out and free it up */
+ chunk_written =
+ yaffs_wr_data_obj(cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ cache->object = NULL;
+ }
+ } while (cache && chunk_written > 0);
+
+ if (cache)
+ /* Hoosterman, disk full while writing cache out. */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: no space during cache write");
+}
+
+/*yaffs_flush_whole_cache(dev)
+ *
+ *
+ */
+
+void yaffs_flush_whole_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int n_caches = dev->param.n_caches;
+ int i;
+
+ /* Find a dirty object in the cache and flush it...
+ * until there are no further dirty objects.
+ */
+ do {
+ obj = NULL;
+ for (i = 0; i < n_caches && !obj; i++) {
+ if (dev->cache[i].object && dev->cache[i].dirty)
+ obj = dev->cache[i].object;
+ }
+ if (obj)
+ yaffs_flush_file_cache(obj);
+ } while (obj);
+
+}
+
+/* Grab us a cache chunk for use.
+ * First look for an empty one.
+ * Then look for the least recently used non-dirty one.
+ * Then look for the least recently used dirty one...., flush and look again.
+ */
+static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
+{
+ int i;
+
+ if (dev->param.n_caches > 0) {
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (!dev->cache[i].object)
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
+{
+ struct yaffs_cache *cache;
+ struct yaffs_obj *the_obj;
+ int usage;
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ /* Try find a non-dirty one... */
+
+ cache = yaffs_grab_chunk_worker(dev);
+
+ if (!cache) {
+ /* They were all dirty, find the LRU object and flush
+ * its cache, then find again.
+ * NB what's here is not very accurate,
+ * we actually flush the object with the LRU chunk.
+ */
+
+ /* With locking we can't assume we can use entry zero,
+ * Set the_obj to a valid pointer for Coverity. */
+ the_obj = dev->cache[0].object;
+ usage = -1;
+ cache = NULL;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object &&
+ !dev->cache[i].locked &&
+ (dev->cache[i].last_use < usage ||
+ !cache)) {
+ usage = dev->cache[i].last_use;
+ the_obj = dev->cache[i].object;
+ cache = &dev->cache[i];
+ }
+ }
+
+ if (!cache || cache->dirty) {
+ /* Flush and try again */
+ yaffs_flush_file_cache(the_obj);
+ cache = yaffs_grab_chunk_worker(dev);
+ }
+ }
+ return cache;
+}
+
+/* Find a cached chunk */
+static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj,
+ int chunk_id)
+{
+ struct yaffs_dev *dev = obj->my_dev;
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return NULL;
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == obj &&
+ dev->cache[i].chunk_id == chunk_id) {
+ dev->cache_hits++;
+
+ return &dev->cache[i];
+ }
+ }
+ return NULL;
+}
+
+/* Mark the chunk for the least recently used algorithym */
+static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
+ int is_write)
+{
+ int i;
+
+ if (dev->param.n_caches < 1)
+ return;
+
+ if (dev->cache_last_use < 0 ||
+ dev->cache_last_use > 100000000) {
+ /* Reset the cache usages */
+ for (i = 1; i < dev->param.n_caches; i++)
+ dev->cache[i].last_use = 0;
+
+ dev->cache_last_use = 0;
+ }
+ dev->cache_last_use++;
+ cache->last_use = dev->cache_last_use;
+
+ if (is_write)
+ cache->dirty = 1;
+}
+
+/* Invalidate a single cache page.
+ * Do this when a whole page gets written,
+ * ie the short cache for this page is no longer valid.
+ */
+static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id)
+{
+ struct yaffs_cache *cache;
+
+ if (object->my_dev->param.n_caches > 0) {
+ cache = yaffs_find_chunk_cache(object, chunk_id);
+
+ if (cache)
+ cache->object = NULL;
+ }
+}
+
+/* Invalidate all the cache pages associated with this object
+ * Do this whenever ther file is deleted or resized.
+ */
+static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.n_caches > 0) {
+ /* Invalidate it. */
+ for (i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].object == in)
+ dev->cache[i].object = NULL;
+ }
+ }
+}
+
+static void yaffs_unhash_obj(struct yaffs_obj *obj)
+{
+ int bucket;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ /* If it is still linked into the bucket list, free from the list */
+ if (!list_empty(&obj->hash_link)) {
+ list_del_init(&obj->hash_link);
+ bucket = yaffs_hash_fn(obj->obj_id);
+ dev->obj_bucket[bucket].count--;
+ }
+}
+
+/* FreeObject frees up a Object and puts it back on the free list */
+static void yaffs_free_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj) {
+ BUG();
+ return;
+ }
+ dev = obj->my_dev;
+ yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p",
+ obj, obj->my_inode);
+ if (obj->parent)
+ BUG();
+ if (!list_empty(&obj->siblings))
+ BUG();
+
+ if (obj->my_inode) {
+ /* We're still hooked up to a cached inode.
+ * Don't delete now, but mark for later deletion
+ */
+ obj->defered_free = 1;
+ return;
+ }
+
+ yaffs_unhash_obj(obj);
+
+ yaffs_free_raw_obj(dev, obj);
+ dev->n_obj--;
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+}
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj)
+{
+ if (obj->defered_free)
+ yaffs_free_obj(obj);
+}
+
+static int yaffs_generic_obj_del(struct yaffs_obj *in)
+{
+ /* Iinvalidate the file's data in the cache, without flushing. */
+ yaffs_invalidate_whole_cache(in);
+
+ if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) {
+ /* Move to unlinked directory so we have a deletion record */
+ yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0,
+ 0);
+ }
+
+ yaffs_remove_obj_from_dir(in);
+ yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__);
+ in->hdr_chunk = 0;
+
+ yaffs_free_obj(in);
+ return YAFFS_OK;
+
+}
+
+static void yaffs_soft_del_file(struct yaffs_obj *obj)
+{
+ if (!obj->deleted ||
+ obj->variant_type != YAFFS_OBJECT_TYPE_FILE ||
+ obj->soft_del)
+ return;
+
+ if (obj->n_data_chunks <= 0) {
+ /* Empty file with no duplicate object headers,
+ * just delete it immediately */
+ yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top);
+ obj->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: Deleting empty file %d",
+ obj->obj_id);
+ yaffs_generic_obj_del(obj);
+ } else {
+ yaffs_soft_del_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.
+ file_variant.top_level, 0);
+ obj->soft_del = 1;
+ }
+}
+
+/* Pruning removes any part of the file structure tree that is beyond the
+ * bounds of the file (ie that does not point to chunks).
+ *
+ * A file should only get pruned when its size is reduced.
+ *
+ * Before pruning, the chunks must be pulled from the tree and the
+ * level 0 tnode entries must be zeroed out.
+ * Could also use this for file deletion, but that's probably better handled
+ * by a special case.
+ *
+ * This function is recursive. For levels > 0 the function is called again on
+ * any sub-tree. For level == 0 we just check if the sub-tree has data.
+ * If there is no data in a subtree then it is pruned.
+ */
+
+static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev,
+ struct yaffs_tnode *tn, u32 level,
+ int del0)
+{
+ int i;
+ int has_data;
+
+ if (!tn)
+ return tn;
+
+ has_data = 0;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i]) {
+ tn->internal[i] =
+ yaffs_prune_worker(dev,
+ tn->internal[i],
+ level - 1,
+ (i == 0) ? del0 : 1);
+ }
+
+ if (tn->internal[i])
+ has_data++;
+ }
+ } else {
+ int tnode_size_u32 = dev->tnode_size / sizeof(u32);
+ u32 *map = (u32 *) tn;
+
+ for (i = 0; !has_data && i < tnode_size_u32; i++) {
+ if (map[i])
+ has_data++;
+ }
+ }
+
+ if (has_data == 0 && del0) {
+ /* Free and return NULL */
+ yaffs_free_tnode(dev, tn);
+ tn = NULL;
+ }
+ return tn;
+}
+
+static int yaffs_prune_tree(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct)
+{
+ int i;
+ int has_data;
+ int done = 0;
+ struct yaffs_tnode *tn;
+
+ if (file_struct->top_level < 1)
+ return YAFFS_OK;
+
+ file_struct->top =
+ yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0);
+
+ /* Now we have a tree with all the non-zero branches NULL but
+ * the height is the same as it was.
+ * Let's see if we can trim internal tnodes to shorten the tree.
+ * We can do this if only the 0th element in the tnode is in use
+ * (ie all the non-zero are NULL)
+ */
+
+ while (file_struct->top_level && !done) {
+ tn = file_struct->top;
+
+ has_data = 0;
+ for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) {
+ if (tn->internal[i])
+ has_data++;
+ }
+
+ if (!has_data) {
+ file_struct->top = tn->internal[0];
+ file_struct->top_level--;
+ yaffs_free_tnode(dev, tn);
+ } else {
+ done = 1;
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+/*-------------------- End of File Structure functions.-------------------*/
+
+/* alloc_empty_obj gets us a clean Object.*/
+static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev);
+
+ if (!obj)
+ return obj;
+
+ dev->n_obj++;
+
+ /* Now sweeten it up... */
+
+ memset(obj, 0, sizeof(struct yaffs_obj));
+ obj->being_created = 1;
+
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0;
+ obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN;
+ INIT_LIST_HEAD(&(obj->hard_links));
+ INIT_LIST_HEAD(&(obj->hash_link));
+ INIT_LIST_HEAD(&obj->siblings);
+
+ /* Now make the directory sane */
+ if (dev->root_dir) {
+ obj->parent = dev->root_dir;
+ list_add(&(obj->siblings),
+ &dev->root_dir->variant.dir_variant.children);
+ }
+
+ /* Add it to the lost and found directory.
+ * NB Can't put root or lost-n-found in lost-n-found so
+ * check if lost-n-found exists first
+ */
+ if (dev->lost_n_found)
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+
+ obj->being_created = 0;
+
+ dev->checkpoint_blocks_required = 0; /* force recalculation */
+
+ return obj;
+}
+
+static int yaffs_find_nice_bucket(struct yaffs_dev *dev)
+{
+ int i;
+ int l = 999;
+ int lowest = 999999;
+
+ /* Search for the shortest list or one that
+ * isn't too long.
+ */
+
+ for (i = 0; i < 10 && lowest > 4; i++) {
+ dev->bucket_finder++;
+ dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS;
+ if (dev->obj_bucket[dev->bucket_finder].count < lowest) {
+ lowest = dev->obj_bucket[dev->bucket_finder].count;
+ l = dev->bucket_finder;
+ }
+ }
+
+ return l;
+}
+
+static int yaffs_new_obj_id(struct yaffs_dev *dev)
+{
+ int bucket = yaffs_find_nice_bucket(dev);
+ int found = 0;
+ struct list_head *i;
+ u32 n = (u32) bucket;
+
+ /* Now find an object value that has not already been taken
+ * by scanning the list.
+ */
+
+ while (!found) {
+ found = 1;
+ n += YAFFS_NOBJECT_BUCKETS;
+ if (1 || dev->obj_bucket[bucket].count > 0) {
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* If there is already one in the list */
+ if (i && list_entry(i, struct yaffs_obj,
+ hash_link)->obj_id == n) {
+ found = 0;
+ }
+ }
+ }
+ }
+ return n;
+}
+
+static void yaffs_hash_obj(struct yaffs_obj *in)
+{
+ int bucket = yaffs_hash_fn(in->obj_id);
+ struct yaffs_dev *dev = in->my_dev;
+
+ list_add(&in->hash_link, &dev->obj_bucket[bucket].list);
+ dev->obj_bucket[bucket].count++;
+}
+
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number)
+{
+ int bucket = yaffs_hash_fn(number);
+ struct list_head *i;
+ struct yaffs_obj *in;
+
+ list_for_each(i, &dev->obj_bucket[bucket].list) {
+ /* Look if it is in the list */
+ in = list_entry(i, struct yaffs_obj, hash_link);
+ if (in->obj_id == number) {
+ /* Don't show if it is defered free */
+ if (in->defered_free)
+ return NULL;
+ return in;
+ }
+ }
+
+ return NULL;
+}
+
+struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+ struct yaffs_tnode *tn = NULL;
+
+ if (number < 0)
+ number = yaffs_new_obj_id(dev);
+
+ if (type == YAFFS_OBJECT_TYPE_FILE) {
+ tn = yaffs_get_tnode(dev);
+ if (!tn)
+ return NULL;
+ }
+
+ the_obj = yaffs_alloc_empty_obj(dev);
+ if (!the_obj) {
+ if (tn)
+ yaffs_free_tnode(dev, tn);
+ return NULL;
+ }
+
+ the_obj->fake = 0;
+ the_obj->rename_allowed = 1;
+ the_obj->unlink_allowed = 1;
+ the_obj->obj_id = number;
+ yaffs_hash_obj(the_obj);
+ the_obj->variant_type = type;
+ yaffs_load_current_time(the_obj, 1, 1);
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ the_obj->variant.file_variant.file_size = 0;
+ the_obj->variant.file_variant.scanned_size = 0;
+ the_obj->variant.file_variant.shrink_size =
+ yaffs_max_file_size(dev);
+ the_obj->variant.file_variant.top_level = 0;
+ the_obj->variant.file_variant.top = tn;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.children);
+ INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* No action required */
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* todo this should not happen */
+ break;
+ }
+ return the_obj;
+}
+
+static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev,
+ int number, u32 mode)
+{
+
+ struct yaffs_obj *obj =
+ yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY);
+
+ if (!obj)
+ return NULL;
+
+ obj->fake = 1; /* it is fake so it might not use NAND */
+ obj->rename_allowed = 0;
+ obj->unlink_allowed = 0;
+ obj->deleted = 0;
+ obj->unlinked = 0;
+ obj->yst_mode = mode;
+ obj->my_dev = dev;
+ obj->hdr_chunk = 0; /* Not a valid chunk. */
+ return obj;
+
+}
+
+
+static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev)
+{
+ int i;
+
+ dev->n_obj = 0;
+ dev->n_tnodes = 0;
+ yaffs_init_raw_tnodes_and_objs(dev);
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ INIT_LIST_HEAD(&dev->obj_bucket[i].list);
+ dev->obj_bucket[i].count = 0;
+ }
+}
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type)
+{
+ struct yaffs_obj *the_obj = NULL;
+
+ if (number > 0)
+ the_obj = yaffs_find_by_number(dev, number);
+
+ if (!the_obj)
+ the_obj = yaffs_new_obj(dev, number, type);
+
+ return the_obj;
+
+}
+
+YCHAR *yaffs_clone_str(const YCHAR *str)
+{
+ YCHAR *new_str = NULL;
+ int len;
+
+ if (!str)
+ str = _Y("");
+
+ len = yaffs_strnlen(str, YAFFS_MAX_ALIAS_LENGTH);
+ new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS);
+ if (new_str) {
+ yaffs_strncpy(new_str, str, len);
+ new_str[len] = 0;
+ }
+ return new_str;
+
+}
+/*
+ *yaffs_update_parent() handles fixing a directories mtime and ctime when a new
+ * link (ie. name) is created or deleted in the directory.
+ *
+ * ie.
+ * create dir/a : update dir's mtime/ctime
+ * rm dir/a: update dir's mtime/ctime
+ * modify dir/a: don't update dir's mtimme/ctime
+ *
+ * This can be handled immediately or defered. Defering helps reduce the number
+ * of updates when many files in a directory are changed within a brief period.
+ *
+ * If the directory updating is defered then yaffs_update_dirty_dirs must be
+ * called periodically.
+ */
+
+static void yaffs_update_parent(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+
+ if (!obj)
+ return;
+ dev = obj->my_dev;
+ obj->dirty = 1;
+ yaffs_load_current_time(obj, 0, 1);
+ if (dev->param.defered_dir_update) {
+ struct list_head *link = &obj->variant.dir_variant.dirty;
+
+ if (list_empty(link)) {
+ list_add(link, &dev->dirty_dirs);
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Added object %d to dirty directories",
+ obj->obj_id);
+ }
+
+ } else {
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev)
+{
+ struct list_head *link;
+ struct yaffs_obj *obj;
+ struct yaffs_dir_var *d_s;
+ union yaffs_obj_var *o_v;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories");
+
+ while (!list_empty(&dev->dirty_dirs)) {
+ link = dev->dirty_dirs.next;
+ list_del_init(link);
+
+ d_s = list_entry(link, struct yaffs_dir_var, dirty);
+ o_v = list_entry(d_s, union yaffs_obj_var, dir_variant);
+ obj = list_entry(o_v, struct yaffs_obj, variant);
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d",
+ obj->obj_id);
+
+ if (obj->dirty)
+ yaffs_update_oh(obj, NULL, 0, 0, 0, NULL);
+ }
+}
+
+/*
+ * Mknod (create) a new object.
+ * equiv_obj only has meaning for a hard link;
+ * alias_str only has meaning for a symlink.
+ * rdev only has meaning for devices (a subset of special objects)
+ */
+
+static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type,
+ struct yaffs_obj *parent,
+ const YCHAR *name,
+ u32 mode,
+ u32 uid,
+ u32 gid,
+ struct yaffs_obj *equiv_obj,
+ const YCHAR *alias_str, u32 rdev)
+{
+ struct yaffs_obj *in;
+ YCHAR *str = NULL;
+ struct yaffs_dev *dev = parent->my_dev;
+
+ /* Check if the entry exists.
+ * If it does then fail the call since we don't want a dup. */
+ if (yaffs_find_by_name(parent, name))
+ return NULL;
+
+ if (type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ str = yaffs_clone_str(alias_str);
+ if (!str)
+ return NULL;
+ }
+
+ in = yaffs_new_obj(dev, -1, type);
+
+ if (!in) {
+ kfree(str);
+ return NULL;
+ }
+
+ in->hdr_chunk = 0;
+ in->valid = 1;
+ in->variant_type = type;
+
+ in->yst_mode = mode;
+
+ yaffs_attribs_init(in, gid, uid, rdev);
+
+ in->n_data_chunks = 0;
+
+ yaffs_set_obj_name(in, name);
+ in->dirty = 1;
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ in->my_dev = parent->my_dev;
+
+ switch (type) {
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.alias = str;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.hardlink_variant.equiv_obj = equiv_obj;
+ in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id;
+ list_add(&in->hard_links, &equiv_obj->hard_links);
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* do nothing */
+ break;
+ }
+
+ if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) {
+ /* Could not create the object header, fail */
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ if (in)
+ yaffs_update_parent(parent);
+
+ return in;
+}
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode,
+ uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name,
+ mode, uid, gid, NULL, NULL, 0);
+}
+
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode,
+ uid, gid, NULL, NULL, rdev);
+}
+
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias)
+{
+ return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode,
+ uid, gid, NULL, alias, 0);
+}
+
+/* yaffs_link_obj returns the object id of the equivalent object.*/
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name,
+ struct yaffs_obj *equiv_obj)
+{
+ /* Get the real object in case we were fed a hard link obj */
+ equiv_obj = yaffs_get_equivalent_obj(equiv_obj);
+
+ if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK,
+ parent, name, 0, 0, 0,
+ equiv_obj, NULL, 0))
+ return equiv_obj;
+
+ return NULL;
+
+}
+
+
+
+/*---------------------- Block Management and Page Allocation -------------*/
+
+static void yaffs_deinit_blocks(struct yaffs_dev *dev)
+{
+ if (dev->block_info_alt && dev->block_info)
+ vfree(dev->block_info);
+ else
+ kfree(dev->block_info);
+
+ dev->block_info_alt = 0;
+
+ dev->block_info = NULL;
+
+ if (dev->chunk_bits_alt && dev->chunk_bits)
+ vfree(dev->chunk_bits);
+ else
+ kfree(dev->chunk_bits);
+ dev->chunk_bits_alt = 0;
+ dev->chunk_bits = NULL;
+}
+
+static int yaffs_init_blocks(struct yaffs_dev *dev)
+{
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ dev->block_info = NULL;
+ dev->chunk_bits = NULL;
+ dev->alloc_block = -1; /* force it to get a new one */
+
+ /* If the first allocation strategy fails, thry the alternate one */
+ dev->block_info =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS);
+ if (!dev->block_info) {
+ dev->block_info =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_info));
+ dev->block_info_alt = 1;
+ } else {
+ dev->block_info_alt = 0;
+ }
+
+ if (!dev->block_info)
+ goto alloc_error;
+
+ /* Set up dynamic blockinfo stuff. Round up bytes. */
+ dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8;
+ dev->chunk_bits =
+ kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS);
+ if (!dev->chunk_bits) {
+ dev->chunk_bits =
+ vmalloc(dev->chunk_bit_stride * n_blocks);
+ dev->chunk_bits_alt = 1;
+ } else {
+ dev->chunk_bits_alt = 0;
+ }
+ if (!dev->chunk_bits)
+ goto alloc_error;
+
+
+ memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info));
+ memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks);
+ return YAFFS_OK;
+
+alloc_error:
+ yaffs_deinit_blocks(dev);
+ return YAFFS_FAIL;
+}
+
+
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
+ int erased_ok = 0;
+ int i;
+
+ /* If the block is still healthy erase it and mark as clean.
+ * If the block has had a data failure, then retire it.
+ */
+
+ yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ "yaffs_block_became_dirty block %d state %d %s",
+ block_no, bi->block_state,
+ (bi->needs_retiring) ? "needs retiring" : "");
+
+ yaffs2_clear_oldest_dirty_seq(dev, bi);
+
+ bi->block_state = YAFFS_BLOCK_STATE_DIRTY;
+
+ /* If this is the block being garbage collected then stop gc'ing */
+ if (block_no == dev->gc_block)
+ dev->gc_block = 0;
+
+ /* If this block is currently the best candidate for gc
+ * then drop as a candidate */
+ if (block_no == dev->gc_dirtiest) {
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ }
+
+ if (!bi->needs_retiring) {
+ yaffs2_checkpt_invalidate(dev);
+ erased_ok = yaffs_erase_block(dev, block_no);
+ if (!erased_ok) {
+ dev->n_erase_failures++;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Erasure failed %d", block_no);
+ }
+ }
+
+ /* Verify erasure if needed */
+ if (erased_ok &&
+ ((yaffs_trace_mask & YAFFS_TRACE_ERASE) ||
+ !yaffs_skip_verification(dev))) {
+ for (i = 0; i < dev->param.chunks_per_block; i++) {
+ if (!yaffs_check_chunk_erased(dev,
+ block_no * dev->param.chunks_per_block + i)) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ ">>Block %d erasure supposedly OK, but chunk %d not erased",
+ block_no, i);
+ }
+ }
+ }
+
+ if (!erased_ok) {
+ /* We lost a block of free space */
+ dev->n_free_chunks -= dev->param.chunks_per_block;
+ yaffs_retire_block(dev, block_no);
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>> Block %d retired", block_no);
+ return;
+ }
+
+ /* Clean it up... */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ bi->seq_number = 0;
+ dev->n_erased_blocks++;
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+ bi->has_shrink_hdr = 0;
+ bi->skip_erased_check = 1; /* Clean, so no need to check */
+ bi->gc_prioritise = 0;
+ bi->has_summary = 0;
+
+ yaffs_clear_chunk_bits(dev, block_no);
+
+ yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no);
+}
+
+static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int old_chunk, u8 *buffer)
+{
+ int new_chunk;
+ int mark_flash = 1;
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj *object;
+ int matching_chunk;
+ int ret_val = YAFFS_OK;
+
+ memset(&tags, 0, sizeof(tags));
+ yaffs_rd_chunk_tags_nand(dev, old_chunk,
+ buffer, &tags);
+ object = yaffs_find_by_number(dev, tags.obj_id);
+
+ yaffs_trace(YAFFS_TRACE_GC_DETAIL,
+ "Collecting chunk in block %d, %d %d %d ",
+ dev->gc_chunk, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+
+ if (object && !yaffs_skip_verification(dev)) {
+ if (tags.chunk_id == 0)
+ matching_chunk =
+ object->hdr_chunk;
+ else if (object->soft_del)
+ /* Defeat the test */
+ matching_chunk = old_chunk;
+ else
+ matching_chunk =
+ yaffs_find_chunk_in_file
+ (object, tags.chunk_id,
+ NULL);
+
+ if (old_chunk != matching_chunk)
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "gc: page in gc mismatch: %d %d %d %d",
+ old_chunk,
+ matching_chunk,
+ tags.obj_id,
+ tags.chunk_id);
+ }
+
+ if (!object) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "page %d in gc has no object: %d %d %d ",
+ old_chunk,
+ tags.obj_id, tags.chunk_id,
+ tags.n_bytes);
+ }
+
+ if (object &&
+ object->deleted &&
+ object->soft_del && tags.chunk_id != 0) {
+ /* Data chunk in a soft deleted file,
+ * throw it away.
+ * It's a soft deleted data chunk,
+ * No need to copy this, just forget
+ * about it and fix up the object.
+ */
+
+ /* Free chunks already includes
+ * softdeleted chunks, how ever this
+ * chunk is going to soon be really
+ * deleted which will increment free
+ * chunks. We have to decrement free
+ * chunks so this works out properly.
+ */
+ dev->n_free_chunks--;
+ bi->soft_del_pages--;
+
+ object->n_data_chunks--;
+ if (object->n_data_chunks <= 0) {
+ /* remeber to clean up obj */
+ dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id;
+ dev->n_clean_ups++;
+ }
+ mark_flash = 0;
+ } else if (object) {
+ /* It's either a data chunk in a live
+ * file or an ObjectHeader, so we're
+ * interested in it.
+ * NB Need to keep the ObjectHeaders of
+ * deleted files until the whole file
+ * has been deleted off
+ */
+ tags.serial_number++;
+ dev->n_gc_copies++;
+
+ if (tags.chunk_id == 0) {
+ /* It is an object Id,
+ * We need to nuke the
+ * shrinkheader flags since its
+ * work is done.
+ * Also need to clean up
+ * shadowing.
+ */
+ struct yaffs_obj_hdr *oh;
+ oh = (struct yaffs_obj_hdr *) buffer;
+
+ oh->is_shrink = 0;
+ tags.extra_is_shrink = 0;
+ oh->shadows_obj = 0;
+ oh->inband_shadowed_obj_id = 0;
+ tags.extra_shadows = 0;
+
+ /* Update file size */
+ if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) {
+ yaffs_oh_size_load(oh,
+ object->variant.file_variant.file_size);
+ tags.extra_file_size =
+ object->variant.file_variant.file_size;
+ }
+
+ yaffs_verify_oh(object, oh, &tags, 1);
+ new_chunk =
+ yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1);
+ } else {
+ new_chunk =
+ yaffs_write_new_chunk(dev, buffer, &tags, 1);
+ }
+
+ if (new_chunk < 0) {
+ ret_val = YAFFS_FAIL;
+ } else {
+
+ /* Now fix up the Tnodes etc. */
+
+ if (tags.chunk_id == 0) {
+ /* It's a header */
+ object->hdr_chunk = new_chunk;
+ object->serial = tags.serial_number;
+ } else {
+ /* It's a data chunk */
+ yaffs_put_chunk_in_file(object, tags.chunk_id,
+ new_chunk, 0);
+ }
+ }
+ }
+ if (ret_val == YAFFS_OK)
+ yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__);
+ return ret_val;
+}
+
+static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block)
+{
+ int old_chunk;
+ int ret_val = YAFFS_OK;
+ int i;
+ int is_checkpt_block;
+ int max_copies;
+ int chunks_before = yaffs_get_erased_chunks(dev);
+ int chunks_after;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, block);
+
+ is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d, in use %d, shrink %d, whole_block %d",
+ block, bi->pages_in_use, bi->has_shrink_hdr,
+ whole_block);
+
+ /*yaffs_verify_free_chunks(dev); */
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ bi->block_state = YAFFS_BLOCK_STATE_COLLECTING;
+
+ bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */
+
+ dev->gc_disable = 1;
+
+ yaffs_summary_gc(dev, block);
+
+ if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) {
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "Collecting block %d that has no chunks in use",
+ block);
+ yaffs_block_became_dirty(dev, block);
+ } else {
+
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_verify_blk(dev, bi, block);
+
+ max_copies = (whole_block) ? dev->param.chunks_per_block : 5;
+ old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk;
+
+ for (/* init already done */ ;
+ ret_val == YAFFS_OK &&
+ dev->gc_chunk < dev->param.chunks_per_block &&
+ (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) &&
+ max_copies > 0;
+ dev->gc_chunk++, old_chunk++) {
+ if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) {
+ /* Page is in use and might need to be copied */
+ max_copies--;
+ ret_val = yaffs_gc_process_chunk(dev, bi,
+ old_chunk, buffer);
+ }
+ }
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ yaffs_verify_collected_blk(dev, bi, block);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ /*
+ * The gc did not complete. Set block state back to FULL
+ * because checkpointing does not restore gc.
+ */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ } else {
+ /* The gc completed. */
+ /* Do any required cleanups */
+ for (i = 0; i < dev->n_clean_ups; i++) {
+ /* Time to delete the file too */
+ struct yaffs_obj *object =
+ yaffs_find_by_number(dev, dev->gc_cleanup_list[i]);
+ if (object) {
+ yaffs_free_tnode(dev,
+ object->variant.file_variant.top);
+ object->variant.file_variant.top = NULL;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: About to finally delete object %d",
+ object->obj_id);
+ yaffs_generic_obj_del(object);
+ object->my_dev->n_deleted_files--;
+ }
+
+ }
+ chunks_after = yaffs_get_erased_chunks(dev);
+ if (chunks_before >= chunks_after)
+ yaffs_trace(YAFFS_TRACE_GC,
+ "gc did not increase free chunks before %d after %d",
+ chunks_before, chunks_after);
+ dev->gc_block = 0;
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ dev->gc_disable = 0;
+
+ return ret_val;
+}
+
+/*
+ * find_gc_block() selects the dirtiest block (or close enough)
+ * for garbage collection.
+ */
+
+static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
+ int aggressive, int background)
+{
+ int i;
+ int iterations;
+ unsigned selected = 0;
+ int prioritised = 0;
+ int prioritised_exist = 0;
+ struct yaffs_block_info *bi;
+ int threshold;
+
+ /* First let's see if we need to grab a prioritised block */
+ if (dev->has_pending_prioritised_gc && !aggressive) {
+ dev->gc_dirtiest = 0;
+ bi = dev->block_info;
+ for (i = dev->internal_start_block;
+ i <= dev->internal_end_block && !selected; i++) {
+
+ if (bi->gc_prioritise) {
+ prioritised_exist = 1;
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ selected = i;
+ prioritised = 1;
+ }
+ }
+ bi++;
+ }
+
+ /*
+ * If there is a prioritised block and none was selected then
+ * this happened because there is at least one old dirty block
+ * gumming up the works. Let's gc the oldest dirty block.
+ */
+
+ if (prioritised_exist &&
+ !selected && dev->oldest_dirty_block > 0)
+ selected = dev->oldest_dirty_block;
+
+ if (!prioritised_exist) /* None found, so we can clear this */
+ dev->has_pending_prioritised_gc = 0;
+ }
+
+ /* If we're doing aggressive GC then we are happy to take a less-dirty
+ * block, and search harder.
+ * else (leasurely gc), then we only bother to do this if the
+ * block has only a few pages in use.
+ */
+
+ if (!selected) {
+ int pages_used;
+ int n_blocks =
+ dev->internal_end_block - dev->internal_start_block + 1;
+ if (aggressive) {
+ threshold = dev->param.chunks_per_block;
+ iterations = n_blocks;
+ } else {
+ int max_threshold;
+
+ if (background)
+ max_threshold = dev->param.chunks_per_block / 2;
+ else
+ max_threshold = dev->param.chunks_per_block / 8;
+
+ if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ max_threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+
+ threshold = background ? (dev->gc_not_done + 2) * 2 : 0;
+ if (threshold < YAFFS_GC_PASSIVE_THRESHOLD)
+ threshold = YAFFS_GC_PASSIVE_THRESHOLD;
+ if (threshold > max_threshold)
+ threshold = max_threshold;
+
+ iterations = n_blocks / 16 + 1;
+ if (iterations > 100)
+ iterations = 100;
+ }
+
+ for (i = 0;
+ i < iterations &&
+ (dev->gc_dirtiest < 1 ||
+ dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH);
+ i++) {
+ dev->gc_block_finder++;
+ if (dev->gc_block_finder < dev->internal_start_block ||
+ dev->gc_block_finder > dev->internal_end_block)
+ dev->gc_block_finder =
+ dev->internal_start_block;
+
+ bi = yaffs_get_block_info(dev, dev->gc_block_finder);
+
+ pages_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL &&
+ pages_used < dev->param.chunks_per_block &&
+ (dev->gc_dirtiest < 1 ||
+ pages_used < dev->gc_pages_in_use) &&
+ yaffs_block_ok_for_gc(dev, bi)) {
+ dev->gc_dirtiest = dev->gc_block_finder;
+ dev->gc_pages_in_use = pages_used;
+ }
+ }
+
+ if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold)
+ selected = dev->gc_dirtiest;
+ }
+
+ /*
+ * If nothing has been selected for a while, try the oldest dirty
+ * because that's gumming up the works.
+ */
+
+ if (!selected && dev->param.is_yaffs2 &&
+ dev->gc_not_done >= (background ? 10 : 20)) {
+ yaffs2_find_oldest_dirty_seq(dev);
+ if (dev->oldest_dirty_block > 0) {
+ selected = dev->oldest_dirty_block;
+ dev->gc_dirtiest = selected;
+ dev->oldest_dirty_gc_count++;
+ bi = yaffs_get_block_info(dev, selected);
+ dev->gc_pages_in_use =
+ bi->pages_in_use - bi->soft_del_pages;
+ } else {
+ dev->gc_not_done = 0;
+ }
+ }
+
+ if (selected) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC Selected block %d with %d free, prioritised:%d",
+ selected,
+ dev->param.chunks_per_block - dev->gc_pages_in_use,
+ prioritised);
+
+ dev->n_gc_blocks++;
+ if (background)
+ dev->bg_gcs++;
+
+ dev->gc_dirtiest = 0;
+ dev->gc_pages_in_use = 0;
+ dev->gc_not_done = 0;
+ if (dev->refresh_skip > 0)
+ dev->refresh_skip--;
+ } else {
+ dev->gc_not_done++;
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s",
+ dev->gc_block_finder, dev->gc_not_done, threshold,
+ dev->gc_dirtiest, dev->gc_pages_in_use,
+ dev->oldest_dirty_block, background ? " bg" : "");
+ }
+
+ return selected;
+}
+
+/* New garbage collector
+ * If we're very low on erased blocks then we do aggressive garbage collection
+ * otherwise we do "leasurely" garbage collection.
+ * Aggressive gc looks further (whole array) and will accept less dirty blocks.
+ * Passive gc only inspects smaller areas and only accepts more dirty blocks.
+ *
+ * The idea is to help clear out space in a more spread-out manner.
+ * Dunno if it really does anything useful.
+ */
+static int yaffs_check_gc(struct yaffs_dev *dev, int background)
+{
+ int aggressive = 0;
+ int gc_ok = YAFFS_OK;
+ int max_tries = 0;
+ int min_erased;
+ int erased_chunks;
+ int checkpt_block_adjust;
+
+ if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0)
+ return YAFFS_OK;
+
+ if (dev->gc_disable)
+ /* Bail out so we don't get recursive gc */
+ return YAFFS_OK;
+
+ /* This loop should pass the first time.
+ * Only loops here if the collection does not increase space.
+ */
+
+ do {
+ max_tries++;
+
+ checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev);
+
+ min_erased =
+ dev->param.n_reserved_blocks + checkpt_block_adjust + 1;
+ erased_chunks =
+ dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ /* If we need a block soon then do aggressive gc. */
+ if (dev->n_erased_blocks < min_erased)
+ aggressive = 1;
+ else {
+ if (!background
+ && erased_chunks > (dev->n_free_chunks / 4))
+ break;
+
+ if (dev->gc_skip > 20)
+ dev->gc_skip = 20;
+ if (erased_chunks < dev->n_free_chunks / 2 ||
+ dev->gc_skip < 1 || background)
+ aggressive = 0;
+ else {
+ dev->gc_skip--;
+ break;
+ }
+ }
+
+ dev->gc_skip = 5;
+
+ /* If we don't already have a block being gc'd then see if we
+ * should start another */
+
+ if (dev->gc_block < 1 && !aggressive) {
+ dev->gc_block = yaffs2_find_refresh_block(dev);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+ if (dev->gc_block < 1) {
+ dev->gc_block =
+ yaffs_find_gc_block(dev, aggressive, background);
+ dev->gc_chunk = 0;
+ dev->n_clean_ups = 0;
+ }
+
+ if (dev->gc_block > 0) {
+ dev->all_gcs++;
+ if (!aggressive)
+ dev->passive_gc_count++;
+
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC n_erased_blocks %d aggressive %d",
+ dev->n_erased_blocks, aggressive);
+
+ gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
+ }
+
+ if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
+ dev->gc_block > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
+ dev->n_erased_blocks, max_tries,
+ dev->gc_block);
+ }
+ } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ (dev->gc_block > 0) && (max_tries < 2));
+
+ return aggressive ? gc_ok : YAFFS_OK;
+}
+
+/*
+ * yaffs_bg_gc()
+ * Garbage collects. Intended to be called from a background thread.
+ * Returns non-zero if at least half the free chunks are erased.
+ */
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency)
+{
+ int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block;
+
+ yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency);
+
+ yaffs_check_gc(dev, 1);
+ return erased_chunks > dev->n_free_chunks / 2;
+}
+
+/*-------------------- Data file manipulation -----------------*/
+
+static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer)
+{
+ int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL);
+
+ if (nand_chunk >= 0)
+ return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk,
+ buffer, NULL);
+ else {
+ yaffs_trace(YAFFS_TRACE_NANDACCESS,
+ "Chunk %d not found zero instead",
+ nand_chunk);
+ /* get sane (zero) data if you read a hole */
+ memset(buffer, 0, in->my_dev->data_bytes_per_chunk);
+ return 0;
+ }
+
+}
+
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn)
+{
+ int block;
+ int page;
+ struct yaffs_ext_tags tags;
+ struct yaffs_block_info *bi;
+
+ if (chunk_id <= 0)
+ return;
+
+ dev->n_deletions++;
+ block = chunk_id / dev->param.chunks_per_block;
+ page = chunk_id % dev->param.chunks_per_block;
+
+ if (!yaffs_check_chunk_bit(dev, block, page))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Deleting invalid chunk %d", chunk_id);
+
+ bi = yaffs_get_block_info(dev, block);
+
+ yaffs2_update_oldest_dirty_seq(dev, block, bi);
+
+ yaffs_trace(YAFFS_TRACE_DELETION,
+ "line %d delete of chunk %d",
+ lyn, chunk_id);
+
+ if (!dev->param.is_yaffs2 && mark_flash &&
+ bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) {
+
+ memset(&tags, 0, sizeof(tags));
+ tags.is_deleted = 1;
+ yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags);
+ yaffs_handle_chunk_update(dev, chunk_id, &tags);
+ } else {
+ dev->n_unmarked_deletions++;
+ }
+
+ /* Pull out of the management area.
+ * If the whole block became dirty, this will kick off an erasure.
+ */
+ if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING ||
+ bi->block_state == YAFFS_BLOCK_STATE_FULL ||
+ bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) {
+ dev->n_free_chunks++;
+ yaffs_clear_chunk_bit(dev, block, page);
+ bi->pages_in_use--;
+
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING &&
+ bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ yaffs_block_became_dirty(dev, block);
+ }
+ }
+}
+
+static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
+ const u8 *buffer, int n_bytes, int use_reserve)
+{
+ /* Find old chunk Need to do this to get serial number
+ * Write new one and patch into tree.
+ * Invalidate old tags.
+ */
+
+ int prev_chunk_id;
+ struct yaffs_ext_tags prev_tags;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_dev *dev = in->my_dev;
+
+ yaffs_check_gc(dev, 0);
+
+ /* Get the previous chunk at this location in the file if it exists.
+ * If it does not exist then put a zero into the tree. This creates
+ * the tnode now, rather than later when it is harder to clean up.
+ */
+ prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags);
+ if (prev_chunk_id < 1 &&
+ !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0))
+ return 0;
+
+ /* Set up new tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+
+ new_tags.chunk_id = inode_chunk;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number =
+ (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1;
+ new_tags.n_bytes = n_bytes;
+
+ if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Writing %d bytes to chunk!!!!!!!!!",
+ n_bytes);
+ BUG();
+ }
+
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve);
+
+ if (new_chunk_id > 0) {
+ yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0);
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ yaffs_verify_file_sane(in);
+ }
+ return new_chunk_id;
+
+}
+
+
+
+static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set,
+ const YCHAR *name, const void *value, int size,
+ int flags)
+{
+ struct yaffs_xattr_mod xmod;
+ int result;
+
+ xmod.set = set;
+ xmod.name = name;
+ xmod.data = value;
+ xmod.size = size;
+ xmod.flags = flags;
+ xmod.result = -ENOSPC;
+
+ result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod);
+
+ if (result > 0)
+ return xmod.result;
+ else
+ return -ENOSPC;
+}
+
+static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer,
+ struct yaffs_xattr_mod *xmod)
+{
+ int retval = 0;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer = buffer + x_offs;
+
+ if (xmod->set)
+ retval =
+ nval_set(x_buffer, x_size, xmod->name, xmod->data,
+ xmod->size, xmod->flags);
+ else
+ retval = nval_del(x_buffer, x_size, xmod->name);
+
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ xmod->result = retval;
+
+ return retval;
+}
+
+static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name,
+ void *value, int size)
+{
+ char *buffer = NULL;
+ int result;
+ struct yaffs_ext_tags tags;
+ struct yaffs_dev *dev = obj->my_dev;
+ int x_offs = sizeof(struct yaffs_obj_hdr);
+ int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr);
+ char *x_buffer;
+ int retval = 0;
+
+ if (obj->hdr_chunk < 1)
+ return -ENODATA;
+
+ /* If we know that the object has no xattribs then don't do all the
+ * reading and parsing.
+ */
+ if (obj->xattr_known && !obj->has_xattr) {
+ if (name)
+ return -ENODATA;
+ else
+ return 0;
+ }
+
+ buffer = (char *)yaffs_get_temp_buffer(dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ result =
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags);
+
+ if (result != YAFFS_OK)
+ retval = -ENOENT;
+ else {
+ x_buffer = buffer + x_offs;
+
+ if (!obj->xattr_known) {
+ obj->has_xattr = nval_hasvalues(x_buffer, x_size);
+ obj->xattr_known = 1;
+ }
+
+ if (name)
+ retval = nval_get(x_buffer, x_size, name, value, size);
+ else
+ retval = nval_list(x_buffer, x_size, value, size);
+ }
+ yaffs_release_temp_buffer(dev, (u8 *) buffer);
+ return retval;
+}
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name,
+ const void *value, int size, int flags)
+{
+ return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags);
+}
+
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name)
+{
+ return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0);
+}
+
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value,
+ int size)
+{
+ return yaffs_do_xattrib_fetch(obj, name, value, size);
+}
+
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size)
+{
+ return yaffs_do_xattrib_fetch(obj, NULL, buffer, size);
+}
+
+static void yaffs_check_obj_details_loaded(struct yaffs_obj *in)
+{
+ u8 *buf;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+
+ if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
+ return;
+
+ dev = in->my_dev;
+ in->lazy_loaded = 0;
+ buf = yaffs_get_temp_buffer(dev);
+
+ yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
+ oh = (struct yaffs_obj_hdr *)buf;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ yaffs_set_obj_name_from_oh(in, oh);
+
+ if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ in->variant.symlink_variant.alias =
+ yaffs_clone_str(oh->alias);
+ }
+ yaffs_release_temp_buffer(dev, buf);
+}
+
+static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name,
+ const YCHAR *oh_name, int buff_size)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ if (dev->param.auto_unicode) {
+ if (*oh_name) {
+ /* It is an ASCII name, do an ASCII to
+ * unicode conversion */
+ const char *ascii_oh_name = (const char *)oh_name;
+ int n = buff_size - 1;
+ while (n > 0 && *ascii_oh_name) {
+ *name = *ascii_oh_name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ yaffs_strncpy(name, oh_name + 1, buff_size - 1);
+ }
+ } else {
+#else
+ dev = dev;
+ {
+#endif
+ yaffs_strncpy(name, oh_name, buff_size - 1);
+ }
+}
+
+static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name,
+ const YCHAR *name)
+{
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+
+ int is_ascii;
+ YCHAR *w;
+
+ if (dev->param.auto_unicode) {
+
+ is_ascii = 1;
+ w = name;
+
+ /* Figure out if the name will fit in ascii character set */
+ while (is_ascii && *w) {
+ if ((*w) & 0xff00)
+ is_ascii = 0;
+ w++;
+ }
+
+ if (is_ascii) {
+ /* It is an ASCII name, so convert unicode to ascii */
+ char *ascii_oh_name = (char *)oh_name;
+ int n = YAFFS_MAX_NAME_LENGTH - 1;
+ while (n > 0 && *name) {
+ *ascii_oh_name = *name;
+ name++;
+ ascii_oh_name++;
+ n--;
+ }
+ } else {
+ /* Unicode name, so save starting at the second YCHAR */
+ *oh_name = 0;
+ yaffs_strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2);
+ }
+ } else {
+#else
+ dev = dev;
+ {
+#endif
+ yaffs_strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1);
+ }
+}
+
+/* UpdateObjectHeader updates the header on NAND for an object.
+ * If name is not NULL, then that new name is used.
+ */
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force,
+ int is_shrink, int shadows, struct yaffs_xattr_mod *xmod)
+{
+
+ struct yaffs_block_info *bi;
+ struct yaffs_dev *dev = in->my_dev;
+ int prev_chunk_id;
+ int ret_val = 0;
+ int new_chunk_id;
+ struct yaffs_ext_tags new_tags;
+ struct yaffs_ext_tags old_tags;
+ const YCHAR *alias = NULL;
+ u8 *buffer = NULL;
+ YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj_hdr *oh = NULL;
+ loff_t file_size = 0;
+
+ yaffs_strcpy(old_name, _Y("silly old name"));
+
+ if (in->fake && in != dev->root_dir && !force && !xmod)
+ return ret_val;
+
+ yaffs_check_gc(dev, 0);
+ yaffs_check_obj_details_loaded(in);
+
+ buffer = yaffs_get_temp_buffer(in->my_dev);
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ prev_chunk_id = in->hdr_chunk;
+
+ if (prev_chunk_id > 0) {
+ yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ buffer, &old_tags);
+
+ yaffs_verify_oh(in, oh, &old_tags, 0);
+ memcpy(old_name, oh->name, sizeof(oh->name));
+ memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr));
+ } else {
+ memset(buffer, 0xff, dev->data_bytes_per_chunk);
+ }
+
+ oh->type = in->variant_type;
+ oh->yst_mode = in->yst_mode;
+ oh->shadows_obj = oh->inband_shadowed_obj_id = shadows;
+
+ yaffs_load_attribs_oh(oh, in);
+
+ if (in->parent)
+ oh->parent_obj_id = in->parent->obj_id;
+ else
+ oh->parent_obj_id = 0;
+
+ if (name && *name) {
+ memset(oh->name, 0, sizeof(oh->name));
+ yaffs_load_oh_from_name(dev, oh->name, name);
+ } else if (prev_chunk_id > 0) {
+ memcpy(oh->name, old_name, sizeof(oh->name));
+ } else {
+ memset(oh->name, 0, sizeof(oh->name));
+ }
+
+ oh->is_shrink = is_shrink;
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Should not happen */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED &&
+ oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED)
+ file_size = in->variant.file_variant.file_size;
+ yaffs_oh_size_load(oh, file_size);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ oh->equiv_id = in->variant.hardlink_variant.equiv_id;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ alias = in->variant.symlink_variant.alias;
+ if (!alias)
+ alias = _Y("no alias");
+ yaffs_strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH);
+ oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0;
+ break;
+ }
+
+ /* process any xattrib modifications */
+ if (xmod)
+ yaffs_apply_xattrib_mod(in, (char *)buffer, xmod);
+
+ /* Tags */
+ memset(&new_tags, 0, sizeof(new_tags));
+ in->serial++;
+ new_tags.chunk_id = 0;
+ new_tags.obj_id = in->obj_id;
+ new_tags.serial_number = in->serial;
+
+ /* Add extra info for file header */
+ new_tags.extra_available = 1;
+ new_tags.extra_parent_id = oh->parent_obj_id;
+ new_tags.extra_file_size = file_size;
+ new_tags.extra_is_shrink = oh->is_shrink;
+ new_tags.extra_equiv_id = oh->equiv_id;
+ new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0;
+ new_tags.extra_obj_type = in->variant_type;
+ yaffs_verify_oh(in, oh, &new_tags, 1);
+
+ /* Create new chunk in NAND */
+ new_chunk_id =
+ yaffs_write_new_chunk(dev, buffer, &new_tags,
+ (prev_chunk_id > 0) ? 1 : 0);
+
+ if (buffer)
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (new_chunk_id < 0)
+ return new_chunk_id;
+
+ in->hdr_chunk = new_chunk_id;
+
+ if (prev_chunk_id > 0)
+ yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__);
+
+ if (!yaffs_obj_cache_dirty(in))
+ in->dirty = 0;
+
+ /* If this was a shrink, then mark the block
+ * that the chunk lives on */
+ if (is_shrink) {
+ bi = yaffs_get_block_info(in->my_dev,
+ new_chunk_id /
+ in->my_dev->param.chunks_per_block);
+ bi->has_shrink_hdr = 1;
+ }
+
+
+ return new_chunk_id;
+}
+
+/*--------------------- File read/write ------------------------
+ * Read and write have very similar structures.
+ * In general the read/write has three parts to it
+ * An incomplete chunk to start with (if the read/write is not chunk-aligned)
+ * Some complete chunks
+ * An incomplete chunk to end off with
+ *
+ * Curve-balls: the first chunk might also be the last chunk.
+ */
+
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+{
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ struct yaffs_cache *cache;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+ chunk++;
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+ if ((start + n) < dev->data_bytes_per_chunk)
+ n_copy = n;
+ else
+ n_copy = dev->data_bytes_per_chunk - start;
+
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ /* If the chunk is already in the cache or it is less than
+ * a whole chunk or we're using inband tags then use the cache
+ * (if there is caching) else bypass the cache.
+ */
+ if (cache || n_copy != dev->data_bytes_per_chunk ||
+ dev->param.inband_tags) {
+ if (dev->param.n_caches > 0) {
+
+ /* If we can't find the data in the cache,
+ * then load it up. */
+
+ if (!cache) {
+ cache =
+ yaffs_grab_chunk_cache(in->my_dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ cache->n_bytes = 0;
+ }
+
+ yaffs_use_cache(dev, cache, 0);
+
+ cache->locked = 1;
+
+ memcpy(buffer, &cache->data[start], n_copy);
+
+ cache->locked = 0;
+ } else {
+ /* Read into the local buffer then copy.. */
+
+ u8 *local_buffer =
+ yaffs_get_temp_buffer(dev);
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+
+ memcpy(buffer, &local_buffer[start], n_copy);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Read directly into the buffer. */
+ yaffs_rd_data_obj(in, chunk, buffer);
+ }
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ return n_done;
+}
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+
+ int chunk;
+ u32 start;
+ int n_copy;
+ int n = n_bytes;
+ int n_done = 0;
+ int n_writeback;
+ loff_t start_write = offset;
+ int chunk_written = 0;
+ u32 n_bytes_read;
+ loff_t chunk_start;
+ struct yaffs_dev *dev;
+
+ dev = in->my_dev;
+
+ while (n > 0 && chunk_written >= 0) {
+ yaffs_addr_to_chunk(dev, offset, &chunk, &start);
+
+ if (((loff_t)chunk) *
+ dev->data_bytes_per_chunk + start != offset ||
+ start >= dev->data_bytes_per_chunk) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "AddrToChunk of offset %lld gives chunk %d start %d",
+ offset, chunk, start);
+ }
+ chunk++; /* File pos to chunk in file offset */
+
+ /* OK now check for the curveball where the start and end are in
+ * the same chunk.
+ */
+
+ if ((start + n) < dev->data_bytes_per_chunk) {
+ n_copy = n;
+
+ /* Now calculate how many bytes to write back....
+ * If we're overwriting and not writing to then end of
+ * file then we need to write back as much as was there
+ * before.
+ */
+
+ chunk_start = (((loff_t)(chunk - 1)) *
+ dev->data_bytes_per_chunk);
+
+ if (chunk_start > in->variant.file_variant.file_size)
+ n_bytes_read = 0; /* Past end of file */
+ else
+ n_bytes_read =
+ in->variant.file_variant.file_size -
+ chunk_start;
+
+ if (n_bytes_read > dev->data_bytes_per_chunk)
+ n_bytes_read = dev->data_bytes_per_chunk;
+
+ n_writeback =
+ (n_bytes_read >
+ (start + n)) ? n_bytes_read : (start + n);
+
+ if (n_writeback < 0 ||
+ n_writeback > dev->data_bytes_per_chunk)
+ BUG();
+
+ } else {
+ n_copy = dev->data_bytes_per_chunk - start;
+ n_writeback = dev->data_bytes_per_chunk;
+ }
+
+ if (n_copy != dev->data_bytes_per_chunk ||
+ dev->param.inband_tags) {
+ /* An incomplete start or end chunk (or maybe both
+ * start and end chunk), or we're using inband tags,
+ * so we want to use the cache buffers.
+ */
+ if (dev->param.n_caches > 0) {
+ struct yaffs_cache *cache;
+
+ /* If we can't find the data in the cache, then
+ * load the cache */
+ cache = yaffs_find_chunk_cache(in, chunk);
+
+ if (!cache &&
+ yaffs_check_alloc_available(dev, 1)) {
+ cache = yaffs_grab_chunk_cache(dev);
+ cache->object = in;
+ cache->chunk_id = chunk;
+ cache->dirty = 0;
+ cache->locked = 0;
+ yaffs_rd_data_obj(in, chunk,
+ cache->data);
+ } else if (cache &&
+ !cache->dirty &&
+ !yaffs_check_alloc_available(dev,
+ 1)) {
+ /* Drop the cache if it was a read cache
+ * item and no space check has been made
+ * for it.
+ */
+ cache = NULL;
+ }
+
+ if (cache) {
+ yaffs_use_cache(dev, cache, 1);
+ cache->locked = 1;
+
+ memcpy(&cache->data[start], buffer,
+ n_copy);
+
+ cache->locked = 0;
+ cache->n_bytes = n_writeback;
+
+ if (write_through) {
+ chunk_written =
+ yaffs_wr_data_obj
+ (cache->object,
+ cache->chunk_id,
+ cache->data,
+ cache->n_bytes, 1);
+ cache->dirty = 0;
+ }
+ } else {
+ chunk_written = -1; /* fail write */
+ }
+ } else {
+ /* An incomplete start or end chunk (or maybe
+ * both start and end chunk). Read into the
+ * local buffer then copy over and write back.
+ */
+
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ yaffs_rd_data_obj(in, chunk, local_buffer);
+ memcpy(&local_buffer[start], buffer, n_copy);
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk,
+ local_buffer,
+ n_writeback, 0);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+ } else {
+ /* A full chunk. Write directly from the buffer. */
+
+ chunk_written =
+ yaffs_wr_data_obj(in, chunk, buffer,
+ dev->data_bytes_per_chunk, 0);
+
+ /* Since we've overwritten the cached data,
+ * we better invalidate it. */
+ yaffs_invalidate_chunk_cache(in, chunk);
+ }
+
+ if (chunk_written >= 0) {
+ n -= n_copy;
+ offset += n_copy;
+ buffer += n_copy;
+ n_done += n_copy;
+ }
+ }
+
+ /* Update file object */
+
+ if ((start_write + n_done) > in->variant.file_variant.file_size)
+ in->variant.file_variant.file_size = (start_write + n_done);
+
+ in->dirty = 1;
+ return n_done;
+}
+
+int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_through)
+{
+ yaffs2_handle_hole(in, offset);
+ return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
+}
+
+/* ---------------------- File resizing stuff ------------------ */
+
+static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size)
+{
+
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+ int i;
+ int chunk_id;
+ u32 dummy;
+ int last_del;
+ int start_del;
+
+ if (old_size > 0)
+ yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
+ else
+ last_del = 0;
+
+ yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1,
+ &start_del, &dummy);
+ last_del++;
+ start_del++;
+
+ /* Delete backwards so that we don't end up with holes if
+ * power is lost part-way through the operation.
+ */
+ for (i = last_del; i >= start_del; i--) {
+ /* NB this could be optimised somewhat,
+ * eg. could retrieve the tags and write them without
+ * using yaffs_chunk_del
+ */
+
+ chunk_id = yaffs_find_del_file_chunk(in, i, NULL);
+
+ if (chunk_id < 1)
+ continue;
+
+ if (chunk_id <
+ (dev->internal_start_block * dev->param.chunks_per_block) ||
+ chunk_id >=
+ ((dev->internal_end_block + 1) *
+ dev->param.chunks_per_block)) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Found daft chunk_id %d for %d",
+ chunk_id, i);
+ } else {
+ in->n_data_chunks--;
+ yaffs_chunk_del(dev, chunk_id, 1, __LINE__);
+ }
+ }
+}
+
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
+{
+ int new_full;
+ u32 new_partial;
+ struct yaffs_dev *dev = obj->my_dev;
+
+ yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial);
+
+ yaffs_prune_chunks(obj, new_size);
+
+ if (new_partial != 0) {
+ int last_chunk = 1 + new_full;
+ u8 *local_buffer = yaffs_get_temp_buffer(dev);
+
+ /* Rewrite the last chunk with its new size and zero pad */
+ yaffs_rd_data_obj(obj, last_chunk, local_buffer);
+ memset(local_buffer + new_partial, 0,
+ dev->data_bytes_per_chunk - new_partial);
+
+ yaffs_wr_data_obj(obj, last_chunk, local_buffer,
+ new_partial, 1);
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+ }
+
+ obj->variant.file_variant.file_size = new_size;
+
+ yaffs_prune_tree(dev, &obj->variant.file_variant);
+}
+
+int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size)
+{
+ struct yaffs_dev *dev = in->my_dev;
+ loff_t old_size = in->variant.file_variant.file_size;
+
+ yaffs_flush_file_cache(in);
+ yaffs_invalidate_whole_cache(in);
+
+ yaffs_check_gc(dev, 0);
+
+ if (in->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ if (new_size == old_size)
+ return YAFFS_OK;
+
+ if (new_size > old_size) {
+ yaffs2_handle_hole(in, new_size);
+ in->variant.file_variant.file_size = new_size;
+ } else {
+ /* new_size < old_size */
+ yaffs_resize_file_down(in, new_size);
+ }
+
+ /* Write a new object header to reflect the resize.
+ * show we've shrunk the file, if need be
+ * Do this only if the file is not in the deleted directories
+ * and is not shadowed.
+ */
+ if (in->parent &&
+ !in->is_shadowed &&
+ in->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ in->parent->obj_id != YAFFS_OBJECTID_DELETED)
+ yaffs_update_oh(in, NULL, 0, 0, 0, NULL);
+
+ return YAFFS_OK;
+}
+
+int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync)
+{
+ if (!in->dirty)
+ return YAFFS_OK;
+
+ yaffs_flush_file_cache(in);
+
+ if (data_sync)
+ return YAFFS_OK;
+
+ if (update_time)
+ yaffs_load_current_time(in, 0, 0);
+
+ return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ?
+ YAFFS_OK : YAFFS_FAIL;
+}
+
+
+/* yaffs_del_file deletes the whole file data
+ * and the inode associated with the file.
+ * It does not delete the links associated with the file.
+ */
+static int yaffs_unlink_file_if_needed(struct yaffs_obj *in)
+{
+ int ret_val;
+ int del_now = 0;
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (!in->my_inode)
+ del_now = 1;
+
+ if (del_now) {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->del_dir,
+ _Y("deleted"), 0, 0);
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: immediate deletion of file %d",
+ in->obj_id);
+ in->deleted = 1;
+ in->my_dev->n_deleted_files++;
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+ yaffs_soft_del_file(in);
+ } else {
+ ret_val =
+ yaffs_change_obj_name(in, in->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+ return ret_val;
+}
+
+int yaffs_del_file(struct yaffs_obj *in)
+{
+ int ret_val = YAFFS_OK;
+ int deleted; /* Need to cache value on stack if in is freed */
+ struct yaffs_dev *dev = in->my_dev;
+
+ if (dev->param.disable_soft_del || dev->param.is_yaffs2)
+ yaffs_resize_file(in, 0);
+
+ if (in->n_data_chunks > 0) {
+ /* Use soft deletion if there is data in the file.
+ * That won't be the case if it has been resized to zero.
+ */
+ if (!in->unlinked)
+ ret_val = yaffs_unlink_file_if_needed(in);
+
+ deleted = in->deleted;
+
+ if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) {
+ in->deleted = 1;
+ deleted = 1;
+ in->my_dev->n_deleted_files++;
+ yaffs_soft_del_file(in);
+ }
+ return deleted ? YAFFS_OK : YAFFS_FAIL;
+ } else {
+ /* The file has no data chunks so we toss it immediately */
+ yaffs_free_tnode(in->my_dev, in->variant.file_variant.top);
+ in->variant.file_variant.top = NULL;
+ yaffs_generic_obj_del(in);
+
+ return YAFFS_OK;
+ }
+}
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj)
+{
+ return (obj &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) &&
+ !(list_empty(&obj->variant.dir_variant.children));
+}
+
+static int yaffs_del_dir(struct yaffs_obj *obj)
+{
+ /* First check that the directory is empty. */
+ if (yaffs_is_non_empty_dir(obj))
+ return YAFFS_FAIL;
+
+ return yaffs_generic_obj_del(obj);
+}
+
+static int yaffs_del_symlink(struct yaffs_obj *in)
+{
+ kfree(in->variant.symlink_variant.alias);
+ in->variant.symlink_variant.alias = NULL;
+
+ return yaffs_generic_obj_del(in);
+}
+
+static int yaffs_del_link(struct yaffs_obj *in)
+{
+ /* remove this hardlink from the list associated with the equivalent
+ * object
+ */
+ list_del_init(&in->hard_links);
+ return yaffs_generic_obj_del(in);
+}
+
+int yaffs_del_obj(struct yaffs_obj *obj)
+{
+ int ret_val = -1;
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ ret_val = yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ if (!list_empty(&obj->variant.dir_variant.dirty)) {
+ yaffs_trace(YAFFS_TRACE_BACKGROUND,
+ "Remove object %d from dirty directories",
+ obj->obj_id);
+ list_del_init(&obj->variant.dir_variant.dirty);
+ }
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ ret_val = yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ ret_val = yaffs_del_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ ret_val = yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ ret_val = 0;
+ break; /* should not happen. */
+ }
+ return ret_val;
+}
+
+static int yaffs_unlink_worker(struct yaffs_obj *obj)
+{
+ int del_now = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (!obj->my_inode)
+ del_now = 1;
+
+ yaffs_update_parent(obj->parent);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ return yaffs_del_link(obj);
+ } else if (!list_empty(&obj->hard_links)) {
+ /* Curve ball: We're unlinking an object that has a hardlink.
+ *
+ * This problem arises because we are not strictly following
+ * The Linux link/inode model.
+ *
+ * We can't really delete the object.
+ * Instead, we do the following:
+ * - Select a hardlink.
+ * - Unhook it from the hard links
+ * - Move it from its parent directory so that the rename works.
+ * - Rename the object to the hardlink's name.
+ * - Delete the hardlink
+ */
+
+ struct yaffs_obj *hl;
+ struct yaffs_obj *parent;
+ int ret_val;
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ hl = list_entry(obj->hard_links.next, struct yaffs_obj,
+ hard_links);
+
+ yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1);
+ parent = hl->parent;
+
+ list_del_init(&hl->hard_links);
+
+ yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl);
+
+ ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0);
+
+ if (ret_val == YAFFS_OK)
+ ret_val = yaffs_generic_obj_del(hl);
+
+ return ret_val;
+
+ } else if (del_now) {
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return yaffs_del_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ list_del_init(&obj->variant.dir_variant.dirty);
+ return yaffs_del_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return yaffs_del_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ return yaffs_generic_obj_del(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ return YAFFS_FAIL;
+ }
+ } else if (yaffs_is_non_empty_dir(obj)) {
+ return YAFFS_FAIL;
+ } else {
+ return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir,
+ _Y("unlinked"), 0, 0);
+ }
+}
+
+static int yaffs_unlink_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->unlink_allowed)
+ return yaffs_unlink_worker(obj);
+
+ return YAFFS_FAIL;
+}
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name)
+{
+ struct yaffs_obj *obj;
+
+ obj = yaffs_find_by_name(dir, name);
+ return yaffs_unlink_obj(obj);
+}
+
+/* Note:
+ * If old_name is NULL then we take old_dir as the object to be renamed.
+ */
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name,
+ struct yaffs_obj *new_dir, const YCHAR *new_name)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *existing_target = NULL;
+ int force = 0;
+ int result;
+ struct yaffs_dev *dev;
+
+ if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+ if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ dev = old_dir->my_dev;
+
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+ /* Special case for case insemsitive systems.
+ * While look-up is case insensitive, the name isn't.
+ * Therefore we might want to change x.txt to X.txt
+ */
+ if (old_dir == new_dir &&
+ old_name && new_name &&
+ yaffs_strcmp(old_name, new_name) == 0)
+ force = 1;
+#endif
+
+ if (yaffs_strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) >
+ YAFFS_MAX_NAME_LENGTH)
+ /* ENAMETOOLONG */
+ return YAFFS_FAIL;
+
+ if (old_name)
+ obj = yaffs_find_by_name(old_dir, old_name);
+ else{
+ obj = old_dir;
+ old_dir = obj->parent;
+ }
+
+ if (obj && obj->rename_allowed) {
+ /* Now handle an existing target, if there is one */
+ existing_target = yaffs_find_by_name(new_dir, new_name);
+ if (yaffs_is_non_empty_dir(existing_target)) {
+ return YAFFS_FAIL; /* ENOTEMPTY */
+ } else if (existing_target && existing_target != obj) {
+ /* Nuke the target first, using shadowing,
+ * but only if it isn't the same object.
+ *
+ * Note we must disable gc here otherwise it can mess
+ * up the shadowing.
+ *
+ */
+ dev->gc_disable = 1;
+ yaffs_change_obj_name(obj, new_dir, new_name, force,
+ existing_target->obj_id);
+ existing_target->is_shadowed = 1;
+ yaffs_unlink_obj(existing_target);
+ dev->gc_disable = 0;
+ }
+
+ result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0);
+
+ yaffs_update_parent(old_dir);
+ if (new_dir != old_dir)
+ yaffs_update_parent(new_dir);
+
+ return result;
+ }
+ return YAFFS_FAIL;
+}
+
+/*----------------------- Initialisation Scanning ---------------------- */
+
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning)
+{
+ struct yaffs_obj *obj;
+
+ if (backward_scanning) {
+ /* Handle YAFFS2 case (backward scanning)
+ * If the shadowed object exists then ignore.
+ */
+ obj = yaffs_find_by_number(dev, obj_id);
+ if (obj)
+ return;
+ }
+
+ /* Let's create it (if it does not exist) assuming it is a file so that
+ * it can do shrinking etc.
+ * We put it in unlinked dir to be cleaned up after the scanning
+ */
+ obj =
+ yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE);
+ if (!obj)
+ return;
+ obj->is_shadowed = 1;
+ yaffs_add_obj_to_dir(dev->unlinked_dir, obj);
+ obj->variant.file_variant.shrink_size = 0;
+ obj->valid = 1; /* So that we don't read any other info. */
+}
+
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list)
+{
+ struct list_head *lh;
+ struct list_head *save;
+ struct yaffs_obj *hl;
+ struct yaffs_obj *in;
+
+ list_for_each_safe(lh, save, hard_list) {
+ hl = list_entry(lh, struct yaffs_obj, hard_links);
+ in = yaffs_find_by_number(dev,
+ hl->variant.hardlink_variant.equiv_id);
+
+ if (in) {
+ /* Add the hardlink pointers */
+ hl->variant.hardlink_variant.equiv_obj = in;
+ list_add(&hl->hard_links, &in->hard_links);
+ } else {
+ /* Todo Need to report/handle this better.
+ * Got a problem... hardlink to a non-existant object
+ */
+ hl->variant.hardlink_variant.equiv_obj = NULL;
+ INIT_LIST_HEAD(&hl->hard_links);
+ }
+ }
+}
+
+static void yaffs_strip_deleted_objs(struct yaffs_dev *dev)
+{
+ /*
+ * Sort out state of unlinked and deleted objects after scanning.
+ */
+ struct list_head *i;
+ struct list_head *n;
+ struct yaffs_obj *l;
+
+ if (dev->read_only)
+ return;
+
+ /* Soft delete all the unlinked files */
+ list_for_each_safe(i, n,
+ &dev->unlinked_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+
+ list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+ yaffs_del_obj(l);
+ }
+}
+
+/*
+ * This code iterates through all the objects making sure that they are rooted.
+ * Any unrooted objects are re-rooted in lost+found.
+ * An object needs to be in one of:
+ * - Directly under deleted, unlinked
+ * - Directly or indirectly under root.
+ *
+ * Note:
+ * This code assumes that we don't ever change the current relationships
+ * between directories:
+ * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL
+ * lost-n-found->parent == root_dir
+ *
+ * This fixes the problem where directories might have inadvertently been
+ * deleted leaving the object "hanging" without being rooted in the
+ * directory tree.
+ */
+
+static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj)
+{
+ return (obj == dev->del_dir ||
+ obj == dev->unlinked_dir || obj == dev->root_dir);
+}
+
+static void yaffs_fix_hanging_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *parent;
+ int i;
+ struct list_head *lh;
+ struct list_head *n;
+ int depth_limit;
+ int hanging;
+
+ if (dev->read_only)
+ return;
+
+ /* Iterate through the objects in each hash entry,
+ * looking at each object.
+ * Make sure it is rooted.
+ */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each_safe(lh, n, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ parent = obj->parent;
+
+ if (yaffs_has_null_parent(dev, obj)) {
+ /* These directories are not hanging */
+ hanging = 0;
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ hanging = 1;
+ } else if (yaffs_has_null_parent(dev, parent)) {
+ hanging = 0;
+ } else {
+ /*
+ * Need to follow the parent chain to
+ * see if it is hanging.
+ */
+ hanging = 0;
+ depth_limit = 100;
+
+ while (parent != dev->root_dir &&
+ parent->parent &&
+ parent->parent->variant_type ==
+ YAFFS_OBJECT_TYPE_DIRECTORY &&
+ depth_limit > 0) {
+ parent = parent->parent;
+ depth_limit--;
+ }
+ if (parent != dev->root_dir)
+ hanging = 1;
+ }
+ if (hanging) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Hanging object %d moved to lost and found",
+ obj->obj_id);
+ yaffs_add_obj_to_dir(dev->lost_n_found, obj);
+ }
+ }
+ }
+}
+
+/*
+ * Delete directory contents for cleaning up lost and found.
+ */
+static void yaffs_del_dir_contents(struct yaffs_obj *dir)
+{
+ struct yaffs_obj *obj;
+ struct list_head *lh;
+ struct list_head *n;
+
+ if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ BUG();
+
+ list_for_each_safe(lh, n, &dir->variant.dir_variant.children) {
+ obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffs_del_dir_contents(obj);
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Deleting lost_found object %d",
+ obj->obj_id);
+ yaffs_unlink_obj(obj);
+ }
+}
+
+static void yaffs_empty_l_n_f(struct yaffs_dev *dev)
+{
+ yaffs_del_dir_contents(dev->lost_n_found);
+}
+
+
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory,
+ const YCHAR *name)
+{
+ int sum;
+ struct list_head *i;
+ YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1];
+ struct yaffs_obj *l;
+
+ if (!name)
+ return NULL;
+
+ if (!directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: null pointer directory"
+ );
+ BUG();
+ return NULL;
+ }
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "tragedy: yaffs_find_by_name: non-directory"
+ );
+ BUG();
+ }
+
+ sum = yaffs_calc_name_sum(name);
+
+ list_for_each(i, &directory->variant.dir_variant.children) {
+ l = list_entry(i, struct yaffs_obj, siblings);
+
+ if (l->parent != directory)
+ BUG();
+
+ yaffs_check_obj_details_loaded(l);
+
+ /* Special case for lost-n-found */
+ if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ if (!yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME))
+ return l;
+ } else if (l->sum == sum || l->hdr_chunk <= 0) {
+ /* LostnFound chunk called Objxxx
+ * Do a real check
+ */
+ yaffs_get_obj_name(l, buffer,
+ YAFFS_MAX_NAME_LENGTH + 1);
+ if (!yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH))
+ return l;
+ }
+ }
+ return NULL;
+}
+
+/* GetEquivalentObject dereferences any hard links to get to the
+ * actual object.
+ */
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj)
+{
+ if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) {
+ obj = obj->variant.hardlink_variant.equiv_obj;
+ yaffs_check_obj_details_loaded(obj);
+ }
+ return obj;
+}
+
+/*
+ * A note or two on object names.
+ * * If the object name is missing, we then make one up in the form objnnn
+ *
+ * * ASCII names are stored in the object header's name field from byte zero
+ * * Unicode names are historically stored starting from byte zero.
+ *
+ * Then there are automatic Unicode names...
+ * The purpose of these is to save names in a way that can be read as
+ * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII
+ * system to share files.
+ *
+ * These automatic unicode are stored slightly differently...
+ * - If the name can fit in the ASCII character space then they are saved as
+ * ascii names as per above.
+ * - If the name needs Unicode then the name is saved in Unicode
+ * starting at oh->name[1].
+
+ */
+static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
+ int buffer_size)
+{
+ /* Create an object name if we could not find one. */
+ if (yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) {
+ YCHAR local_name[20];
+ YCHAR num_string[20];
+ YCHAR *x = &num_string[19];
+ unsigned v = obj->obj_id;
+ num_string[19] = 0;
+ while (v > 0) {
+ x--;
+ *x = '0' + (v % 10);
+ v /= 10;
+ }
+ /* make up a name */
+ yaffs_strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX);
+ yaffs_strcat(local_name, x);
+ yaffs_strncpy(name, local_name, buffer_size - 1);
+ }
+}
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size)
+{
+ memset(name, 0, buffer_size * sizeof(YCHAR));
+ yaffs_check_obj_details_loaded(obj);
+ if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) {
+ yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1);
+ } else if (obj->short_name[0]) {
+ yaffs_strcpy(name, obj->short_name);
+ } else if (obj->hdr_chunk > 0) {
+ u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
+
+ struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
+
+ memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
+
+ if (obj->hdr_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(obj->my_dev,
+ obj->hdr_chunk,
+ buffer, NULL);
+ }
+ yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
+ buffer_size);
+
+ yaffs_release_temp_buffer(obj->my_dev, buffer);
+ }
+
+ yaffs_fix_null_name(obj, name, buffer_size);
+
+ return yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH);
+}
+
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj)
+{
+ /* Dereference any hard linking */
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ return obj->variant.file_variant.file_size;
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ if (!obj->variant.symlink_variant.alias)
+ return 0;
+ return yaffs_strnlen(obj->variant.symlink_variant.alias,
+ YAFFS_MAX_ALIAS_LENGTH);
+ } else {
+ /* Only a directory should drop through to here */
+ return obj->my_dev->data_bytes_per_chunk;
+ }
+}
+
+int yaffs_get_obj_link_count(struct yaffs_obj *obj)
+{
+ int count = 0;
+ struct list_head *i;
+
+ if (!obj->unlinked)
+ count++; /* the object itself */
+
+ list_for_each(i, &obj->hard_links)
+ count++; /* add the hard links; */
+
+ return count;
+}
+
+int yaffs_get_obj_inode(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj->obj_id;
+}
+
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ return DT_DIR;
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ return DT_LNK;
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ return DT_REG;
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ if (S_ISFIFO(obj->yst_mode))
+ return DT_FIFO;
+ if (S_ISCHR(obj->yst_mode))
+ return DT_CHR;
+ if (S_ISBLK(obj->yst_mode))
+ return DT_BLK;
+ if (S_ISSOCK(obj->yst_mode))
+ return DT_SOCK;
+ return DT_REG;
+ break;
+ default:
+ return DT_REG;
+ break;
+ }
+}
+
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj)
+{
+ obj = yaffs_get_equivalent_obj(obj);
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ return yaffs_clone_str(obj->variant.symlink_variant.alias);
+ else
+ return yaffs_clone_str(_Y(""));
+}
+
+/*--------------------------- Initialisation code -------------------------- */
+
+static int yaffs_check_dev_fns(const struct yaffs_dev *dev)
+{
+ /* Common functions, gotta have */
+ if (!dev->param.erase_fn || !dev->param.initialise_flash_fn)
+ return 0;
+
+ /* Can use the "with tags" style interface for yaffs1 or yaffs2 */
+ if (dev->param.write_chunk_tags_fn &&
+ dev->param.read_chunk_tags_fn &&
+ !dev->param.write_chunk_fn &&
+ !dev->param.read_chunk_fn &&
+ dev->param.bad_block_fn && dev->param.query_block_fn)
+ return 1;
+
+ /* Can use the "spare" style interface for yaffs1 */
+ if (!dev->param.is_yaffs2 &&
+ !dev->param.write_chunk_tags_fn &&
+ !dev->param.read_chunk_tags_fn &&
+ dev->param.write_chunk_fn &&
+ dev->param.read_chunk_fn &&
+ !dev->param.bad_block_fn && !dev->param.query_block_fn)
+ return 1;
+
+ return 0; /* bad */
+}
+
+static int yaffs_create_initial_dir(struct yaffs_dev *dev)
+{
+ /* Initialise the unlinked, deleted, root and lost+found directories */
+ dev->lost_n_found = dev->root_dir = NULL;
+ dev->unlinked_dir = dev->del_dir = NULL;
+ dev->unlinked_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR);
+ dev->del_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR);
+ dev->root_dir =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT,
+ YAFFS_ROOT_MODE | S_IFDIR);
+ dev->lost_n_found =
+ yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND,
+ YAFFS_LOSTNFOUND_MODE | S_IFDIR);
+
+ if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir
+ && dev->del_dir) {
+ yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+int yaffs_guts_initialise(struct yaffs_dev *dev)
+{
+ int init_failed = 0;
+ unsigned x;
+ int bits;
+
+ yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()");
+
+ /* Check stuff that must be set */
+
+ if (!dev) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "yaffs: Need a device"
+ );
+ return YAFFS_FAIL;
+ }
+
+ if (dev->is_mounted) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted");
+ return YAFFS_FAIL;
+ }
+
+ dev->internal_start_block = dev->param.start_block;
+ dev->internal_end_block = dev->param.end_block;
+ dev->block_offset = 0;
+ dev->chunk_offset = 0;
+ dev->n_free_chunks = 0;
+
+ dev->gc_block = 0;
+
+ if (dev->param.start_block == 0) {
+ dev->internal_start_block = dev->param.start_block + 1;
+ dev->internal_end_block = dev->param.end_block + 1;
+ dev->block_offset = 1;
+ dev->chunk_offset = dev->param.chunks_per_block;
+ }
+
+ /* Check geometry parameters. */
+
+ if ((!dev->param.inband_tags && dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 1024) ||
+ (!dev->param.is_yaffs2 &&
+ dev->param.total_bytes_per_chunk < 512) ||
+ (dev->param.inband_tags && !dev->param.is_yaffs2) ||
+ dev->param.chunks_per_block < 2 ||
+ dev->param.n_reserved_blocks < 2 ||
+ dev->internal_start_block <= 0 ||
+ dev->internal_end_block <= 0 ||
+ dev->internal_end_block <=
+ (dev->internal_start_block + dev->param.n_reserved_blocks + 2)
+ ) {
+ /* otherwise it is too small */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ",
+ dev->param.total_bytes_per_chunk,
+ dev->param.is_yaffs2 ? "2" : "",
+ dev->param.inband_tags);
+ return YAFFS_FAIL;
+ }
+
+ if (yaffs_init_nand(dev) != YAFFS_OK) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed");
+ return YAFFS_FAIL;
+ }
+
+ /* Sort out space for inband tags, if required */
+ if (dev->param.inband_tags)
+ dev->data_bytes_per_chunk =
+ dev->param.total_bytes_per_chunk -
+ sizeof(struct yaffs_packed_tags2_tags_only);
+ else
+ dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk;
+
+ /* Got the right mix of functions? */
+ if (!yaffs_check_dev_fns(dev)) {
+ /* Function missing */
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "device function(s) missing or wrong");
+
+ return YAFFS_FAIL;
+ }
+
+ /* Finished with most checks. Further checks happen later on too. */
+
+ dev->is_mounted = 1;
+
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ x = dev->data_bytes_per_chunk;
+ /* We always use dev->chunk_shift and dev->chunk_div */
+ dev->chunk_shift = calc_shifts(x);
+ x >>= dev->chunk_shift;
+ dev->chunk_div = x;
+ /* We only use chunk mask if chunk_div is 1 */
+ dev->chunk_mask = (1 << dev->chunk_shift) - 1;
+
+ /*
+ * Calculate chunk_grp_bits.
+ * We need to find the next power of 2 > than internal_end_block
+ */
+
+ x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
+
+ bits = calc_shifts_ceiling(x);
+
+ /* Set up tnode width if wide tnodes are enabled. */
+ if (!dev->param.wide_tnodes_disabled) {
+ /* bits must be even so that we end up with 32-bit words */
+ if (bits & 1)
+ bits++;
+ if (bits < 16)
+ dev->tnode_width = 16;
+ else
+ dev->tnode_width = bits;
+ } else {
+ dev->tnode_width = 16;
+ }
+
+ dev->tnode_mask = (1 << dev->tnode_width) - 1;
+
+ /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled),
+ * so if the bitwidth of the
+ * chunk range we're using is greater than 16 we need
+ * to figure out chunk shift and chunk_grp_size
+ */
+
+ if (bits <= dev->tnode_width)
+ dev->chunk_grp_bits = 0;
+ else
+ dev->chunk_grp_bits = bits - dev->tnode_width;
+
+ dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
+ if (dev->tnode_size < sizeof(struct yaffs_tnode))
+ dev->tnode_size = sizeof(struct yaffs_tnode);
+
+ dev->chunk_grp_size = 1 << dev->chunk_grp_bits;
+
+ if (dev->param.chunks_per_block < dev->chunk_grp_size) {
+ /* We have a problem because the soft delete won't work if
+ * the chunk group size > chunks per block.
+ * This can be remedied by using larger "virtual blocks".
+ */
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large");
+
+ return YAFFS_FAIL;
+ }
+
+ /* Finished verifying the device, continue with initialisation */
+
+ /* More device initialisation */
+ dev->all_gcs = 0;
+ dev->passive_gc_count = 0;
+ dev->oldest_dirty_gc_count = 0;
+ dev->bg_gcs = 0;
+ dev->gc_block_finder = 0;
+ dev->buffered_block = -1;
+ dev->doing_buffered_block_rewrite = 0;
+ dev->n_deleted_files = 0;
+ dev->n_bg_deletions = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_ecc_fixed = 0;
+ dev->n_ecc_unfixed = 0;
+ dev->n_tags_ecc_fixed = 0;
+ dev->n_tags_ecc_unfixed = 0;
+ dev->n_erase_failures = 0;
+ dev->n_erased_blocks = 0;
+ dev->gc_disable = 0;
+ dev->has_pending_prioritised_gc = 1;
+ /* Assume the worst for now, will get fixed on first GC */
+ INIT_LIST_HEAD(&dev->dirty_dirs);
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+
+ /* Initialise temporary buffers and caches. */
+ if (!yaffs_init_tmp_buffers(dev))
+ init_failed = 1;
+
+ dev->cache = NULL;
+ dev->gc_cleanup_list = NULL;
+
+ if (!init_failed && dev->param.n_caches > 0) {
+ int i;
+ void *buf;
+ int cache_bytes =
+ dev->param.n_caches * sizeof(struct yaffs_cache);
+
+ if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES)
+ dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES;
+
+ dev->cache = kmalloc(cache_bytes, GFP_NOFS);
+
+ buf = (u8 *) dev->cache;
+
+ if (dev->cache)
+ memset(dev->cache, 0, cache_bytes);
+
+ for (i = 0; i < dev->param.n_caches && buf; i++) {
+ dev->cache[i].object = NULL;
+ dev->cache[i].last_use = 0;
+ dev->cache[i].dirty = 0;
+ dev->cache[i].data = buf =
+ kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS);
+ }
+ if (!buf)
+ init_failed = 1;
+
+ dev->cache_last_use = 0;
+ }
+
+ dev->cache_hits = 0;
+
+ if (!init_failed) {
+ dev->gc_cleanup_list =
+ kmalloc(dev->param.chunks_per_block * sizeof(u32),
+ GFP_NOFS);
+ if (!dev->gc_cleanup_list)
+ init_failed = 1;
+ }
+
+ if (dev->param.is_yaffs2)
+ dev->param.use_header_file_size = 1;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && dev->param.is_yaffs2 &&
+ !dev->param.disable_summary &&
+ !yaffs_summary_init(dev))
+ init_failed = 1;
+
+ if (!init_failed) {
+ /* Now scan the flash. */
+ if (dev->param.is_yaffs2) {
+ if (yaffs2_checkpt_restore(dev)) {
+ yaffs_check_obj_details_loaded(dev->root_dir);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT |
+ YAFFS_TRACE_MOUNT,
+ "yaffs: restored from checkpoint"
+ );
+ } else {
+
+ /* Clean up the mess caused by an aborted
+ * checkpoint load then scan backwards.
+ */
+ yaffs_deinit_blocks(dev);
+
+ yaffs_deinit_tnodes_and_objs(dev);
+
+ dev->n_erased_blocks = 0;
+ dev->n_free_chunks = 0;
+ dev->alloc_block = -1;
+ dev->alloc_page = -1;
+ dev->n_deleted_files = 0;
+ dev->n_unlinked_files = 0;
+ dev->n_bg_deletions = 0;
+
+ if (!init_failed && !yaffs_init_blocks(dev))
+ init_failed = 1;
+
+ yaffs_init_tnodes_and_objs(dev);
+
+ if (!init_failed
+ && !yaffs_create_initial_dir(dev))
+ init_failed = 1;
+
+ if (!init_failed && !yaffs2_scan_backwards(dev))
+ init_failed = 1;
+ }
+ } else if (!yaffs1_scan(dev)) {
+ init_failed = 1;
+ }
+
+ yaffs_strip_deleted_objs(dev);
+ yaffs_fix_hanging_objs(dev);
+ if (dev->param.empty_lost_n_found)
+ yaffs_empty_l_n_f(dev);
+ }
+
+ if (init_failed) {
+ /* Clean up the mess */
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() aborted.");
+
+ yaffs_deinitialise(dev);
+ return YAFFS_FAIL;
+ }
+
+ /* Zero out stats */
+ dev->n_page_reads = 0;
+ dev->n_page_writes = 0;
+ dev->n_erasures = 0;
+ dev->n_gc_copies = 0;
+ dev->n_retried_writes = 0;
+
+ dev->n_retired_blocks = 0;
+
+ yaffs_verify_free_chunks(dev);
+ yaffs_verify_blocks(dev);
+
+ /* Clean up any aborted checkpoint data */
+ if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0)
+ yaffs2_checkpt_invalidate(dev);
+
+ yaffs_trace(YAFFS_TRACE_TRACING,
+ "yaffs: yaffs_guts_initialise() done.");
+ return YAFFS_OK;
+}
+
+void yaffs_deinitialise(struct yaffs_dev *dev)
+{
+ if (dev->is_mounted) {
+ int i;
+
+ yaffs_deinit_blocks(dev);
+ yaffs_deinit_tnodes_and_objs(dev);
+ yaffs_summary_deinit(dev);
+
+ if (dev->param.n_caches > 0 && dev->cache) {
+
+ for (i = 0; i < dev->param.n_caches; i++) {
+ kfree(dev->cache[i].data);
+ dev->cache[i].data = NULL;
+ }
+
+ kfree(dev->cache);
+ dev->cache = NULL;
+ }
+
+ kfree(dev->gc_cleanup_list);
+
+ for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++)
+ kfree(dev->temp_buffer[i].buffer);
+
+ dev->is_mounted = 0;
+
+ if (dev->param.deinitialise_flash_fn)
+ dev->param.deinitialise_flash_fn(dev);
+ }
+}
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev)
+{
+ int n_free = 0;
+ int b;
+ struct yaffs_block_info *blk;
+
+ blk = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+ switch (blk->block_state) {
+ case YAFFS_BLOCK_STATE_EMPTY:
+ case YAFFS_BLOCK_STATE_ALLOCATING:
+ case YAFFS_BLOCK_STATE_COLLECTING:
+ case YAFFS_BLOCK_STATE_FULL:
+ n_free +=
+ (dev->param.chunks_per_block - blk->pages_in_use +
+ blk->soft_del_pages);
+ break;
+ default:
+ break;
+ }
+ blk++;
+ }
+ return n_free;
+}
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
+{
+ /* This is what we report to the outside world */
+ int n_free;
+ int n_dirty_caches;
+ int blocks_for_checkpt;
+ int i;
+
+ n_free = dev->n_free_chunks;
+ n_free += dev->n_deleted_files;
+
+ /* Now count and subtract the number of dirty chunks in the cache. */
+
+ for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) {
+ if (dev->cache[i].dirty)
+ n_dirty_caches++;
+ }
+
+ n_free -= n_dirty_caches;
+
+ n_free -=
+ ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block);
+
+ /* Now figure checkpoint space and report that... */
+ blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev);
+
+ n_free -= (blocks_for_checkpt * dev->param.chunks_per_block);
+
+ if (n_free < 0)
+ n_free = 0;
+
+ return n_free;
+}
+
+/*\
+ * Marshalling functions to get loff_t file sizes into aand out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize)
+{
+ oh->file_size_low = (fsize & 0xFFFFFFFF);
+ oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF);
+}
+
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh)
+{
+ loff_t retval;
+
+ if (~(oh->file_size_high))
+ retval = (((loff_t) oh->file_size_high) << 32) |
+ (((loff_t) oh->file_size_low) & 0xFFFFFFFF);
+ else
+ retval = (loff_t) oh->file_size_low;
+
+ return retval;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_guts.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_guts.h
new file mode 100644
index 000000000..e3558c5a6
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_guts.h
@@ -0,0 +1,973 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_GUTS_H__
+#define __YAFFS_GUTS_H__
+
+#include "yportenv.h"
+
+#define YAFFS_OK 1
+#define YAFFS_FAIL 0
+
+/* Give us a Y=0x59,
+ * Give us an A=0x41,
+ * Give us an FF=0xff
+ * Give us an S=0x53
+ * And what have we got...
+ */
+#define YAFFS_MAGIC 0x5941ff53
+
+/*
+ * Tnodes form a tree with the tnodes in "levels"
+ * Levels greater than 0 hold 8 slots which point to other tnodes.
+ * Those at level 0 hold 16 slots which point to chunks in NAND.
+ *
+ * A maximum level of 8 thust supports files of size up to:
+ *
+ * 2^(3*MAX_LEVEL+4)
+ *
+ * Thus a max level of 8 supports files with up to 2^^28 chunks which gives
+ * a maximum file size of arounf 51Gbytees with 2k chunks.
+ */
+#define YAFFS_NTNODES_LEVEL0 16
+#define YAFFS_TNODES_LEVEL0_BITS 4
+#define YAFFS_TNODES_LEVEL0_MASK 0xf
+
+#define YAFFS_NTNODES_INTERNAL (YAFFS_NTNODES_LEVEL0 / 2)
+#define YAFFS_TNODES_INTERNAL_BITS (YAFFS_TNODES_LEVEL0_BITS - 1)
+#define YAFFS_TNODES_INTERNAL_MASK 0x7
+#define YAFFS_TNODES_MAX_LEVEL 8
+#define YAFFS_TNODES_MAX_BITS (YAFFS_TNODES_LEVEL0_BITS + \
+ YAFFS_TNODES_INTERNAL_BITS * \
+ YAFFS_TNODES_MAX_LEVEL)
+#define YAFFS_MAX_CHUNK_ID ((1 << YAFFS_TNODES_MAX_BITS) - 1)
+
+/* Constants for YAFFS1 mode */
+#define YAFFS_BYTES_PER_SPARE 16
+#define YAFFS_BYTES_PER_CHUNK 512
+#define YAFFS_CHUNK_SIZE_SHIFT 9
+#define YAFFS_CHUNKS_PER_BLOCK 32
+#define YAFFS_BYTES_PER_BLOCK (YAFFS_CHUNKS_PER_BLOCK*YAFFS_BYTES_PER_CHUNK)
+
+#define YAFFS_MIN_YAFFS2_CHUNK_SIZE 1024
+#define YAFFS_MIN_YAFFS2_SPARE_SIZE 32
+
+
+
+#define YAFFS_ALLOCATION_NOBJECTS 100
+#define YAFFS_ALLOCATION_NTNODES 100
+#define YAFFS_ALLOCATION_NLINKS 100
+
+#define YAFFS_NOBJECT_BUCKETS 256
+
+#define YAFFS_OBJECT_SPACE 0x40000
+#define YAFFS_MAX_OBJECT_ID (YAFFS_OBJECT_SPACE - 1)
+
+/* Binary data version stamps */
+#define YAFFS_SUMMARY_VERSION 1
+#define YAFFS_CHECKPOINT_VERSION 6
+
+#ifdef CONFIG_YAFFS_UNICODE
+#define YAFFS_MAX_NAME_LENGTH 127
+#define YAFFS_MAX_ALIAS_LENGTH 79
+#else
+#define YAFFS_MAX_NAME_LENGTH 255
+#define YAFFS_MAX_ALIAS_LENGTH 159
+#endif
+
+#define YAFFS_SHORT_NAME_LENGTH 15
+
+/* Some special object ids for pseudo objects */
+#define YAFFS_OBJECTID_ROOT 1
+#define YAFFS_OBJECTID_LOSTNFOUND 2
+#define YAFFS_OBJECTID_UNLINKED 3
+#define YAFFS_OBJECTID_DELETED 4
+
+/* Fake object Id for summary data */
+#define YAFFS_OBJECTID_SUMMARY 0x10
+
+/* Pseudo object ids for checkpointing */
+#define YAFFS_OBJECTID_CHECKPOINT_DATA 0x20
+#define YAFFS_SEQUENCE_CHECKPOINT_DATA 0x21
+
+#define YAFFS_MAX_SHORT_OP_CACHES 20
+
+#define YAFFS_N_TEMP_BUFFERS 6
+
+/* We limit the number attempts at sucessfully saving a chunk of data.
+ * Small-page devices have 32 pages per block; large-page devices have 64.
+ * Default to something in the order of 5 to 10 blocks worth of chunks.
+ */
+#define YAFFS_WR_ATTEMPTS (5*64)
+
+/* Sequence numbers are used in YAFFS2 to determine block allocation order.
+ * The range is limited slightly to help distinguish bad numbers from good.
+ * This also allows us to perhaps in the future use special numbers for
+ * special purposes.
+ * EFFFFF00 allows the allocation of 8 blocks/second (~1Mbytes) for 15 years,
+ * and is a larger number than the lifetime of a 2GB device.
+ */
+#define YAFFS_LOWEST_SEQUENCE_NUMBER 0x00001000
+#define YAFFS_HIGHEST_SEQUENCE_NUMBER 0xefffff00
+
+/* Special sequence number for bad block that failed to be marked bad */
+#define YAFFS_SEQUENCE_BAD_BLOCK 0xffff0000
+
+/* ChunkCache is used for short read/write operations.*/
+struct yaffs_cache {
+ struct yaffs_obj *object;
+ int chunk_id;
+ int last_use;
+ int dirty;
+ int n_bytes; /* Only valid if the cache is dirty */
+ int locked; /* Can't push out or flush while locked. */
+ u8 *data;
+};
+
+/* yaffs1 tags structures in RAM
+ * NB This uses bitfield. Bitfields should not straddle a u32 boundary
+ * otherwise the structure size will get blown out.
+ */
+
+struct yaffs_tags {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes_lsb:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned n_bytes_msb:2;
+};
+
+union yaffs_tags_union {
+ struct yaffs_tags as_tags;
+ u8 as_bytes[8];
+};
+
+
+/* Stuff used for extended tags in YAFFS2 */
+
+enum yaffs_ecc_result {
+ YAFFS_ECC_RESULT_UNKNOWN,
+ YAFFS_ECC_RESULT_NO_ERROR,
+ YAFFS_ECC_RESULT_FIXED,
+ YAFFS_ECC_RESULT_UNFIXED
+};
+
+enum yaffs_obj_type {
+ YAFFS_OBJECT_TYPE_UNKNOWN,
+ YAFFS_OBJECT_TYPE_FILE,
+ YAFFS_OBJECT_TYPE_SYMLINK,
+ YAFFS_OBJECT_TYPE_DIRECTORY,
+ YAFFS_OBJECT_TYPE_HARDLINK,
+ YAFFS_OBJECT_TYPE_SPECIAL
+};
+
+#define YAFFS_OBJECT_TYPE_MAX YAFFS_OBJECT_TYPE_SPECIAL
+
+struct yaffs_ext_tags {
+ unsigned chunk_used; /* Status of the chunk: used or unused */
+ unsigned obj_id; /* If 0 this is not used */
+ unsigned chunk_id; /* If 0 this is a header, else a data chunk */
+ unsigned n_bytes; /* Only valid for data chunks */
+
+ /* The following stuff only has meaning when we read */
+ enum yaffs_ecc_result ecc_result;
+ unsigned block_bad;
+
+ /* YAFFS 1 stuff */
+ unsigned is_deleted; /* The chunk is marked deleted */
+ unsigned serial_number; /* Yaffs1 2-bit serial number */
+
+ /* YAFFS2 stuff */
+ unsigned seq_number; /* The sequence number of this block */
+
+ /* Extra info if this is an object header (YAFFS2 only) */
+
+ unsigned extra_available; /* Extra info available if not zero */
+ unsigned extra_parent_id; /* The parent object */
+ unsigned extra_is_shrink; /* Is it a shrink header? */
+ unsigned extra_shadows; /* Does this shadow another object? */
+
+ enum yaffs_obj_type extra_obj_type; /* What object type? */
+
+ loff_t extra_file_size; /* Length if it is a file */
+ unsigned extra_equiv_id; /* Equivalent object for a hard link */
+};
+
+/* Spare structure for YAFFS1 */
+struct yaffs_spare {
+ u8 tb0;
+ u8 tb1;
+ u8 tb2;
+ u8 tb3;
+ u8 page_status; /* set to 0 to delete the chunk */
+ u8 block_status;
+ u8 tb4;
+ u8 tb5;
+ u8 ecc1[3];
+ u8 tb6;
+ u8 tb7;
+ u8 ecc2[3];
+};
+
+/*Special structure for passing through to mtd */
+struct yaffs_nand_spare {
+ struct yaffs_spare spare;
+ int eccres1;
+ int eccres2;
+};
+
+/* Block data in RAM */
+
+enum yaffs_block_state {
+ YAFFS_BLOCK_STATE_UNKNOWN = 0,
+
+ YAFFS_BLOCK_STATE_SCANNING,
+ /* Being scanned */
+
+ YAFFS_BLOCK_STATE_NEEDS_SCAN,
+ /* The block might have something on it (ie it is allocating or full,
+ * perhaps empty) but it needs to be scanned to determine its true
+ * state.
+ * This state is only valid during scanning.
+ * NB We tolerate empty because the pre-scanner might be incapable of
+ * deciding
+ * However, if this state is returned on a YAFFS2 device,
+ * then we expect a sequence number
+ */
+
+ YAFFS_BLOCK_STATE_EMPTY,
+ /* This block is empty */
+
+ YAFFS_BLOCK_STATE_ALLOCATING,
+ /* This block is partially allocated.
+ * At least one page holds valid data.
+ * This is the one currently being used for page
+ * allocation. Should never be more than one of these.
+ * If a block is only partially allocated at mount it is treated as
+ * full.
+ */
+
+ YAFFS_BLOCK_STATE_FULL,
+ /* All the pages in this block have been allocated.
+ * If a block was only partially allocated when mounted we treat
+ * it as fully allocated.
+ */
+
+ YAFFS_BLOCK_STATE_DIRTY,
+ /* The block was full and now all chunks have been deleted.
+ * Erase me, reuse me.
+ */
+
+ YAFFS_BLOCK_STATE_CHECKPOINT,
+ /* This block is assigned to holding checkpoint data. */
+
+ YAFFS_BLOCK_STATE_COLLECTING,
+ /* This block is being garbage collected */
+
+ YAFFS_BLOCK_STATE_DEAD
+ /* This block has failed and is not in use */
+};
+
+#define YAFFS_NUMBER_OF_BLOCK_STATES (YAFFS_BLOCK_STATE_DEAD + 1)
+
+struct yaffs_block_info {
+
+ int soft_del_pages:10; /* number of soft deleted pages */
+ int pages_in_use:10; /* number of pages in use */
+ unsigned block_state:4; /* One of the above block states. */
+ /* NB use unsigned because enum is sometimes
+ * an int */
+ u32 needs_retiring:1; /* Data has failed on this block, */
+ /*need to get valid data off and retire*/
+ u32 skip_erased_check:1;/* Skip the erased check on this block */
+ u32 gc_prioritise:1; /* An ECC check or blank check has failed.
+ Block should be prioritised for GC */
+ u32 chunk_error_strikes:3; /* How many times we've had ecc etc
+ failures on this block and tried to reuse it */
+ u32 has_summary:1; /* The block has a summary */
+
+ u32 has_shrink_hdr:1; /* This block has at least one shrink header */
+ u32 seq_number; /* block sequence number for yaffs2 */
+
+};
+
+/* -------------------------- Object structure -------------------------------*/
+/* This is the object structure as stored on NAND */
+
+struct yaffs_obj_hdr {
+ enum yaffs_obj_type type;
+
+ /* Apply to everything */
+ int parent_obj_id;
+ u16 sum_no_longer_used; /* checksum of name. No longer used */
+ YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
+
+ /* The following apply to all object types except for hard links */
+ u32 yst_mode; /* protection */
+
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+
+ /* File size applies to files only */
+ u32 file_size_low;
+
+ /* Equivalent object id applies to hard links only. */
+ int equiv_id;
+
+ /* Alias is for symlinks only. */
+ YCHAR alias[YAFFS_MAX_ALIAS_LENGTH + 1];
+
+ u32 yst_rdev; /* stuff for block and char devices (major/min) */
+
+ u32 win_ctime[2];
+ u32 win_atime[2];
+ u32 win_mtime[2];
+
+ u32 inband_shadowed_obj_id;
+ u32 inband_is_shrink;
+
+ u32 file_size_high;
+ u32 reserved[1];
+ int shadows_obj; /* This object header shadows the
+ specified object if > 0 */
+
+ /* is_shrink applies to object headers written when wemake a hole. */
+ u32 is_shrink;
+
+};
+
+/*--------------------------- Tnode -------------------------- */
+
+struct yaffs_tnode {
+ struct yaffs_tnode *internal[YAFFS_NTNODES_INTERNAL];
+};
+
+/*------------------------ Object -----------------------------*/
+/* An object can be one of:
+ * - a directory (no data, has children links
+ * - a regular file (data.... not prunes :->).
+ * - a symlink [symbolic link] (the alias).
+ * - a hard link
+ */
+
+struct yaffs_file_var {
+ loff_t file_size;
+ loff_t scanned_size;
+ loff_t shrink_size;
+ int top_level;
+ struct yaffs_tnode *top;
+};
+
+struct yaffs_dir_var {
+ struct list_head children; /* list of child links */
+ struct list_head dirty; /* Entry for list of dirty directories */
+};
+
+struct yaffs_symlink_var {
+ YCHAR *alias;
+};
+
+struct yaffs_hardlink_var {
+ struct yaffs_obj *equiv_obj;
+ u32 equiv_id;
+};
+
+union yaffs_obj_var {
+ struct yaffs_file_var file_variant;
+ struct yaffs_dir_var dir_variant;
+ struct yaffs_symlink_var symlink_variant;
+ struct yaffs_hardlink_var hardlink_variant;
+};
+
+struct yaffs_obj {
+ u8 deleted:1; /* This should only apply to unlinked files. */
+ u8 soft_del:1; /* it has also been soft deleted */
+ u8 unlinked:1; /* An unlinked file.*/
+ u8 fake:1; /* A fake object has no presence on NAND. */
+ u8 rename_allowed:1; /* Some objects cannot be renamed. */
+ u8 unlink_allowed:1;
+ u8 dirty:1; /* the object needs to be written to flash */
+ u8 valid:1; /* When the file system is being loaded up, this
+ * object might be created before the data
+ * is available
+ * ie. file data chunks encountered before
+ * the header.
+ */
+ u8 lazy_loaded:1; /* This object has been lazy loaded and
+ * is missing some detail */
+
+ u8 defered_free:1; /* Object is removed from NAND, but is
+ * still in the inode cache.
+ * Free of object is defered.
+ * until the inode is released.
+ */
+ u8 being_created:1; /* This object is still being created
+ * so skip some verification checks. */
+ u8 is_shadowed:1; /* This object is shadowed on the way
+ * to being renamed. */
+
+ u8 xattr_known:1; /* We know if this has object has xattribs
+ * or not. */
+ u8 has_xattr:1; /* This object has xattribs.
+ * Only valid if xattr_known. */
+
+ u8 serial; /* serial number of chunk in NAND.*/
+ u16 sum; /* sum of the name to speed searching */
+
+ struct yaffs_dev *my_dev; /* The device I'm on */
+
+ struct list_head hash_link; /* list of objects in hash bucket */
+
+ struct list_head hard_links; /* hard linked object chain*/
+
+ /* directory structure stuff */
+ /* also used for linking up the free list */
+ struct yaffs_obj *parent;
+ struct list_head siblings;
+
+ /* Where's my object header in NAND? */
+ int hdr_chunk;
+
+ int n_data_chunks; /* Number of data chunks for this file. */
+
+ u32 obj_id; /* the object id value */
+
+ u32 yst_mode;
+
+ YCHAR short_name[YAFFS_SHORT_NAME_LENGTH + 1];
+
+#ifdef CONFIG_YAFFS_WINCE
+ u32 win_ctime[2];
+ u32 win_mtime[2];
+ u32 win_atime[2];
+#else
+ u32 yst_uid;
+ u32 yst_gid;
+ u32 yst_atime;
+ u32 yst_mtime;
+ u32 yst_ctime;
+#endif
+
+ u32 yst_rdev;
+
+ void *my_inode;
+
+ enum yaffs_obj_type variant_type;
+
+ union yaffs_obj_var variant;
+
+};
+
+struct yaffs_obj_bucket {
+ struct list_head list;
+ int count;
+};
+
+/* yaffs_checkpt_obj holds the definition of an object as dumped
+ * by checkpointing.
+ */
+
+struct yaffs_checkpt_obj {
+ int struct_type;
+ u32 obj_id;
+ u32 parent_id;
+ int hdr_chunk;
+ enum yaffs_obj_type variant_type:3;
+ u8 deleted:1;
+ u8 soft_del:1;
+ u8 unlinked:1;
+ u8 fake:1;
+ u8 rename_allowed:1;
+ u8 unlink_allowed:1;
+ u8 serial;
+ int n_data_chunks;
+ loff_t size_or_equiv_obj;
+};
+
+/*--------------------- Temporary buffers ----------------
+ *
+ * These are chunk-sized working buffers. Each device has a few.
+ */
+
+struct yaffs_buffer {
+ u8 *buffer;
+ int in_use;
+};
+
+/*----------------- Device ---------------------------------*/
+
+struct yaffs_param {
+ const YCHAR *name;
+
+ /*
+ * Entry parameters set up way early. Yaffs sets up the rest.
+ * The structure should be zeroed out before use so that unused
+ * and defualt values are zero.
+ */
+
+ int inband_tags; /* Use unband tags */
+ u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
+ be a power of 2 */
+ int chunks_per_block; /* does not need to be a power of 2 */
+ int spare_bytes_per_chunk; /* spare area size */
+ int start_block; /* Start block we're allowed to use */
+ int end_block; /* End block we're allowed to use */
+ int n_reserved_blocks; /* Tuneable so that we can reduce
+ * reserved blocks on NOR and RAM. */
+
+ int n_caches; /* If <= 0, then short op caching is disabled,
+ * else the number of short op caches.
+ */
+ int use_nand_ecc; /* Flag to decide whether or not to use
+ * NAND driver ECC on data (yaffs1) */
+ int tags_9bytes; /* Use 9 byte tags */
+ int no_tags_ecc; /* Flag to decide whether or not to do ECC
+ * on packed tags (yaffs2) */
+
+ int is_yaffs2; /* Use yaffs2 mode on this device */
+
+ int empty_lost_n_found; /* Auto-empty lost+found directory on mount */
+
+ int refresh_period; /* How often to check for a block refresh */
+
+ /* Checkpoint control. Can be set before or after initialisation */
+ u8 skip_checkpt_rd;
+ u8 skip_checkpt_wr;
+
+ int enable_xattr; /* Enable xattribs */
+
+ /* NAND access functions (Must be set before calling YAFFS) */
+
+ int (*write_chunk_fn) (struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_spare *spare);
+ int (*read_chunk_fn) (struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_spare *spare);
+ int (*erase_fn) (struct yaffs_dev *dev, int flash_block);
+ int (*initialise_flash_fn) (struct yaffs_dev *dev);
+ int (*deinitialise_flash_fn) (struct yaffs_dev *dev);
+
+ /* yaffs2 mode functions */
+ int (*write_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags);
+ int (*read_chunk_tags_fn) (struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags);
+ int (*bad_block_fn) (struct yaffs_dev *dev, int block_no);
+ int (*query_block_fn) (struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+
+ /* The remove_obj_fn function must be supplied by OS flavours that
+ * need it.
+ * yaffs direct uses it to implement the faster readdir.
+ * Linux uses it to protect the directory during unlocking.
+ */
+ void (*remove_obj_fn) (struct yaffs_obj *obj);
+
+ /* Callback to mark the superblock dirty */
+ void (*sb_dirty_fn) (struct yaffs_dev *dev);
+
+ /* Callback to control garbage collection. */
+ unsigned (*gc_control) (struct yaffs_dev *dev);
+
+ /* Debug control flags. Don't use unless you know what you're doing */
+ int use_header_file_size; /* Flag to determine if we should use
+ * file sizes from the header */
+ int disable_lazy_load; /* Disable lazy loading on this device */
+ int wide_tnodes_disabled; /* Set to disable wide tnodes */
+ int disable_soft_del; /* yaffs 1 only: Set to disable the use of
+ * softdeletion. */
+
+ int defered_dir_update; /* Set to defer directory updates */
+
+#ifdef CONFIG_YAFFS_AUTO_UNICODE
+ int auto_unicode;
+#endif
+ int always_check_erased; /* Force chunk erased check always on */
+
+ int disable_summary;
+
+ int max_objects; /*
+ * Set to limit the number of objects created.
+ * 0 = no limit.
+ */
+};
+
+struct yaffs_dev {
+ struct yaffs_param param;
+
+ /* Context storage. Holds extra OS specific data for this device */
+
+ void *os_context;
+ void *driver_context;
+
+ struct list_head dev_list;
+
+ /* Runtime parameters. Set up by YAFFS. */
+ int data_bytes_per_chunk;
+
+ /* Non-wide tnode stuff */
+ u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ * the tnodes are not wide enough.
+ */
+ u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+
+ /* Stuff to support wide tnodes */
+ u32 tnode_width;
+ u32 tnode_mask;
+ u32 tnode_size;
+
+ /* Stuff for figuring out file offset to chunk conversions */
+ u32 chunk_shift; /* Shift value */
+ u32 chunk_div; /* Divisor after shifting: 1 for 2^n sizes */
+ u32 chunk_mask; /* Mask to use for power-of-2 case */
+
+ int is_mounted;
+ int read_only;
+ int is_checkpointed;
+
+ /* Stuff to support block offsetting to support start block zero */
+ int internal_start_block;
+ int internal_end_block;
+ int block_offset;
+ int chunk_offset;
+
+ /* Runtime checkpointing stuff */
+ int checkpt_page_seq; /* running sequence number of checkpt pages */
+ int checkpt_byte_count;
+ int checkpt_byte_offs;
+ u8 *checkpt_buffer;
+ int checkpt_open_write;
+ int blocks_in_checkpt;
+ int checkpt_cur_chunk;
+ int checkpt_cur_block;
+ int checkpt_next_block;
+ int *checkpt_block_list;
+ int checkpt_max_blocks;
+ u32 checkpt_sum;
+ u32 checkpt_xor;
+
+ int checkpoint_blocks_required; /* Number of blocks needed to store
+ * current checkpoint set */
+
+ /* Block Info */
+ struct yaffs_block_info *block_info;
+ u8 *chunk_bits; /* bitmap of chunks in use */
+ unsigned block_info_alt:1; /* allocated using alternative alloc */
+ unsigned chunk_bits_alt:1; /* allocated using alternative alloc */
+ int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ * Must be consistent with chunks_per_block.
+ */
+
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int alloc_block_finder; /* Used to search for next allocation block */
+
+ /* Object and Tnode memory management */
+ void *allocator;
+ int n_obj;
+ int n_tnodes;
+
+ int n_hardlinks;
+
+ struct yaffs_obj_bucket obj_bucket[YAFFS_NOBJECT_BUCKETS];
+ u32 bucket_finder;
+
+ int n_free_chunks;
+
+ /* Garbage collection control */
+ u32 *gc_cleanup_list; /* objects to delete at the end of a GC. */
+ u32 n_clean_ups;
+
+ unsigned has_pending_prioritised_gc; /* We think this device might
+ have pending prioritised gcs */
+ unsigned gc_disable;
+ unsigned gc_block_finder;
+ unsigned gc_dirtiest;
+ unsigned gc_pages_in_use;
+ unsigned gc_not_done;
+ unsigned gc_block;
+ unsigned gc_chunk;
+ unsigned gc_skip;
+ struct yaffs_summary_tags *gc_sum_tags;
+
+ /* Special directories */
+ struct yaffs_obj *root_dir;
+ struct yaffs_obj *lost_n_found;
+
+ int buffered_block; /* Which block is buffered here? */
+ int doing_buffered_block_rewrite;
+
+ struct yaffs_cache *cache;
+ int cache_last_use;
+
+ /* Stuff for background deletion and unlinked files. */
+ struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
+ files live. */
+ struct yaffs_obj *del_dir; /* Directory where deleted objects are
+ sent to disappear. */
+ struct yaffs_obj *unlinked_deletion; /* Current file being
+ background deleted. */
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* Temporary buffer management */
+ struct yaffs_buffer temp_buffer[YAFFS_N_TEMP_BUFFERS];
+ int max_temp;
+ int temp_in_use;
+ int unmanaged_buffer_allocs;
+ int unmanaged_buffer_deallocs;
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ allocating block */
+ unsigned oldest_dirty_seq;
+ unsigned oldest_dirty_block;
+
+ /* Block refreshing */
+ int refresh_skip; /* A skip down counter.
+ * Refresh happens when this gets to zero. */
+
+ /* Dirty directory handling */
+ struct list_head dirty_dirs; /* List of dirty directories */
+
+ /* Summary */
+ int chunks_per_summary;
+ struct yaffs_summary_tags *sum_tags;
+
+ /* Statistics */
+ u32 n_page_writes;
+ u32 n_page_reads;
+ u32 n_erasures;
+ u32 n_erase_failures;
+ u32 n_gc_copies;
+ u32 all_gcs;
+ u32 passive_gc_count;
+ u32 oldest_dirty_gc_count;
+ u32 n_gc_blocks;
+ u32 bg_gcs;
+ u32 n_retried_writes;
+ u32 n_retired_blocks;
+ u32 n_ecc_fixed;
+ u32 n_ecc_unfixed;
+ u32 n_tags_ecc_fixed;
+ u32 n_tags_ecc_unfixed;
+ u32 n_deletions;
+ u32 n_unmarked_deletions;
+ u32 refresh_count;
+ u32 cache_hits;
+ u32 tags_used;
+ u32 summary_used;
+
+};
+
+/* The CheckpointDevice structure holds the device information that changes
+ *at runtime and must be preserved over unmount/mount cycles.
+ */
+struct yaffs_checkpt_dev {
+ int struct_type;
+ int n_erased_blocks;
+ int alloc_block; /* Current block being allocated off */
+ u32 alloc_page;
+ int n_free_chunks;
+
+ int n_deleted_files; /* Count of files awaiting deletion; */
+ int n_unlinked_files; /* Count of unlinked files. */
+ int n_bg_deletions; /* Count of background deletions. */
+
+ /* yaffs2 runtime stuff */
+ unsigned seq_number; /* Sequence number of currently
+ * allocating block */
+
+};
+
+struct yaffs_checkpt_validity {
+ int struct_type;
+ u32 magic;
+ u32 version;
+ u32 head;
+};
+
+struct yaffs_shadow_fixer {
+ int obj_id;
+ int shadowed_id;
+ struct yaffs_shadow_fixer *next;
+};
+
+/* Structure for doing xattr modifications */
+struct yaffs_xattr_mod {
+ int set; /* If 0 then this is a deletion */
+ const YCHAR *name;
+ const void *data;
+ int size;
+ int flags;
+ int result;
+};
+
+/*----------------------- YAFFS Functions -----------------------*/
+
+int yaffs_guts_initialise(struct yaffs_dev *dev);
+void yaffs_deinitialise(struct yaffs_dev *dev);
+
+int yaffs_get_n_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR * old_name,
+ struct yaffs_obj *new_dir, const YCHAR * new_name);
+
+int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR * name);
+int yaffs_del_obj(struct yaffs_obj *obj);
+
+int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR * name, int buffer_size);
+loff_t yaffs_get_obj_length(struct yaffs_obj *obj);
+int yaffs_get_obj_inode(struct yaffs_obj *obj);
+unsigned yaffs_get_obj_type(struct yaffs_obj *obj);
+int yaffs_get_obj_link_count(struct yaffs_obj *obj);
+
+/* File operations */
+int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
+ int n_bytes);
+int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
+
+struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid);
+
+int yaffs_flush_file(struct yaffs_obj *obj, int update_time, int data_sync);
+
+/* Flushing and checkpointing */
+void yaffs_flush_whole_cache(struct yaffs_dev *dev);
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev);
+int yaffs_checkpoint_restore(struct yaffs_dev *dev);
+
+/* Directory operations */
+struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name,
+ u32 mode, u32 uid, u32 gid);
+struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *the_dir,
+ const YCHAR *name);
+struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number);
+
+/* Link operations */
+struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR *name,
+ struct yaffs_obj *equiv_obj);
+
+struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj);
+
+/* Symlink operations */
+struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, const YCHAR *alias);
+YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj);
+
+/* Special inodes (fifos, sockets and devices) */
+struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent,
+ const YCHAR *name, u32 mode, u32 uid,
+ u32 gid, u32 rdev);
+
+int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR *name,
+ const void *value, int size, int flags);
+int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR *name, void *value,
+ int size);
+int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size);
+int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR *name);
+
+/* Special directories */
+struct yaffs_obj *yaffs_root(struct yaffs_dev *dev);
+struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev);
+
+void yaffs_handle_defered_free(struct yaffs_obj *obj);
+
+void yaffs_update_dirty_dirs(struct yaffs_dev *dev);
+
+int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency);
+
+/* Debug dump */
+int yaffs_dump_obj(struct yaffs_obj *obj);
+
+void yaffs_guts_test(struct yaffs_dev *dev);
+
+/* A few useful functions to be used within the core files*/
+void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash,
+ int lyn);
+int yaffs_check_ff(u8 *buffer, int n_bytes);
+void yaffs_handle_chunk_error(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+
+u8 *yaffs_get_temp_buffer(struct yaffs_dev *dev);
+void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer);
+
+struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev,
+ int number,
+ enum yaffs_obj_type type);
+int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
+ int nand_chunk, int in_scan);
+void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR *name);
+void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj,
+ const struct yaffs_obj_hdr *oh);
+void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
+YCHAR *yaffs_clone_str(const YCHAR *str);
+void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
+ int force, int is_shrink, int shadows,
+ struct yaffs_xattr_mod *xop);
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+ int backward_scanning);
+int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
+struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
+struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id,
+ struct yaffs_tnode *passed_tn);
+
+int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
+ int n_bytes, int write_trhrough);
+void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
+void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
+
+int yaffs_count_free_chunks(struct yaffs_dev *dev);
+
+struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev,
+ struct yaffs_file_var *file_struct,
+ u32 chunk_id);
+
+u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn,
+ unsigned pos);
+
+int yaffs_is_non_empty_dir(struct yaffs_obj *obj);
+
+void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
+ int *chunk_out, u32 *offset_out);
+/*
+ * Marshalling functions to get loff_t file sizes into aand out of
+ * object headers.
+ */
+void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize);
+loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh);
+loff_t yaffs_max_file_size(struct yaffs_dev *dev);
+
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.c
new file mode 100644
index 000000000..6fcba047f
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.c
@@ -0,0 +1,165 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* XXX U-BOOT XXX */
+#include <common.h>
+
+#include "yportenv.h"
+
+
+#include "yaffs_mtdif.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+#include "linux/mtd/nand.h"
+
+
+static inline void translate_spare2oob(const struct yaffs_spare *spare, u8 *oob)
+{
+ oob[0] = spare->tb0;
+ oob[1] = spare->tb1;
+ oob[2] = spare->tb2;
+ oob[3] = spare->tb3;
+ oob[4] = spare->tb4;
+ oob[5] = spare->tb5 & 0x3f;
+ oob[5] |= spare->block_status == 'Y' ? 0 : 0x80;
+ oob[5] |= spare->page_status == 0 ? 0 : 0x40;
+ oob[6] = spare->tb6;
+ oob[7] = spare->tb7;
+}
+
+static inline void translate_oob2spare(struct yaffs_spare *spare, u8 *oob)
+{
+ struct yaffs_nand_spare *nspare = (struct yaffs_nand_spare *)spare;
+ spare->tb0 = oob[0];
+ spare->tb1 = oob[1];
+ spare->tb2 = oob[2];
+ spare->tb3 = oob[3];
+ spare->tb4 = oob[4];
+ spare->tb5 = oob[5] == 0xff ? 0xff : oob[5] & 0x3f;
+ spare->block_status = oob[5] & 0x80 ? 0xff : 'Y';
+ spare->page_status = oob[5] & 0x40 ? 0xff : 0;
+ spare->ecc1[0] = spare->ecc1[1] = spare->ecc1[2] = 0xff;
+ spare->tb6 = oob[6];
+ spare->tb7 = oob[7];
+ spare->ecc2[0] = spare->ecc2[1] = spare->ecc2[2] = 0xff;
+
+ nspare->eccres1 = nspare->eccres2 = 0; /* FIXME */
+}
+
+
+int nandmtd_WriteChunkToNAND(struct yaffs_dev *dev, int chunkInNAND,
+ const u8 *data, const struct yaffs_spare *spare)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->driver_context);
+ struct mtd_oob_ops ops;
+ size_t dummy;
+ int retval = 0;
+ loff_t addr = ((loff_t) chunkInNAND) * dev->data_bytes_per_chunk;
+ u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+ retval = mtd_write(mtd, addr, dev->data_bytes_per_chunk,
+ &dummy, data);
+ else if (spare) {
+ if (dev->param.use_nand_ecc) {
+ translate_spare2oob(spare, spareAsBytes);
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = 8; /* temp hack */
+ } else {
+ ops.mode = MTD_OPS_RAW;
+ ops.ooblen = YAFFS_BYTES_PER_SPARE;
+ }
+ ops.len = data ? dev->data_bytes_per_chunk : ops.ooblen;
+ ops.datbuf = (u8 *)data;
+ ops.ooboffs = 0;
+ ops.oobbuf = spareAsBytes;
+ retval = mtd_write_oob(mtd, addr, &ops);
+ }
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_ReadChunkFromNAND(struct yaffs_dev *dev, int chunkInNAND, u8 *data,
+ struct yaffs_spare *spare)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->driver_context);
+ struct mtd_oob_ops ops;
+ size_t dummy;
+ int retval = 0;
+
+ loff_t addr = ((loff_t) chunkInNAND) * dev->data_bytes_per_chunk;
+ u8 spareAsBytes[8]; /* OOB */
+
+ if (data && !spare)
+ retval = mtd_read(mtd, addr, dev->data_bytes_per_chunk,
+ &dummy, data);
+ else if (spare) {
+ if (dev->param.use_nand_ecc) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = 8; /* temp hack */
+ } else {
+ ops.mode = MTD_OPS_RAW;
+ ops.ooblen = YAFFS_BYTES_PER_SPARE;
+ }
+ ops.len = data ? dev->data_bytes_per_chunk : ops.ooblen;
+ ops.datbuf = data;
+ ops.ooboffs = 0;
+ ops.oobbuf = spareAsBytes;
+ retval = mtd_read_oob(mtd, addr, &ops);
+ if (dev->param.use_nand_ecc)
+ translate_oob2spare(spare, spareAsBytes);
+ }
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_EraseBlockInNAND(struct yaffs_dev *dev, int blockNumber)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->driver_context);
+ __u32 addr =
+ ((loff_t) blockNumber) * dev->data_bytes_per_chunk
+ * dev->param.chunks_per_block;
+ struct erase_info ei;
+ int retval = 0;
+
+ ei.mtd = mtd;
+ ei.addr = addr;
+ ei.len = dev->data_bytes_per_chunk * dev->param.chunks_per_block;
+ ei.time = 1000;
+ ei.retries = 2;
+ ei.callback = NULL;
+ ei.priv = (u_long) dev;
+
+ /* Todo finish off the ei if required */
+
+
+ retval = mtd_erase(mtd, &ei);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd_InitialiseNAND(struct yaffs_dev *dev)
+{
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.h
new file mode 100644
index 000000000..1a125823b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif.h
@@ -0,0 +1,27 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF_H__
+#define __YAFFS_MTDIF_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd_WriteChunkToNAND(struct yaffs_dev *dev, int chunkInNAND,
+ const u8 *data, const struct yaffs_spare *spare);
+int nandmtd_ReadChunkFromNAND(struct yaffs_dev *dev, int chunkInNAND, u8 *data,
+ struct yaffs_spare *spare);
+int nandmtd_EraseBlockInNAND(struct yaffs_dev *dev, int blockNumber);
+int nandmtd_InitialiseNAND(struct yaffs_dev *dev);
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.c
new file mode 100644
index 000000000..234cb706d
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.c
@@ -0,0 +1,232 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* mtd interface for YAFFS2 */
+
+/* XXX U-BOOT XXX */
+#include <common.h>
+#include "asm/errno.h"
+
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#include "yaffs_mtdif2.h"
+
+#include "linux/mtd/mtd.h"
+#include "linux/types.h"
+#include "linux/time.h"
+
+#include "yaffs_trace.h"
+#include "yaffs_packedtags2.h"
+
+#define yaffs_dev_to_mtd(dev) ((struct mtd_info *)((dev)->driver_context))
+#define yaffs_dev_to_lc(dev) ((struct yaffs_linux_context *)((dev)->os_context))
+
+
+/* NB For use with inband tags....
+ * We assume that the data buffer is of size total_bytes_per_chunk so
+ * that we can also use it to load the tags.
+ */
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ struct mtd_oob_ops ops;
+
+ int retval = 0;
+ loff_t addr;
+
+ struct yaffs_packed_tags2 pt;
+
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_write_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+ if (!data || !tags)
+ BUG();
+ else if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)(data +
+ dev->
+ data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ }
+
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = (dev->param.inband_tags) ? 0 : packed_tags_size;
+ ops.len = dev->param.total_bytes_per_chunk;
+ ops.ooboffs = 0;
+ ops.datbuf = (u8 *) data;
+ ops.oobbuf = (dev->param.inband_tags) ? NULL : packed_tags_ptr;
+ retval = mtd_write_oob(mtd, addr, &ops);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags)
+{
+ struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
+ u8 local_spare[128];
+ struct mtd_oob_ops ops;
+ size_t dummy;
+ int retval = 0;
+ int local_data = 0;
+ struct yaffs_packed_tags2 pt;
+ loff_t addr = ((loff_t) nand_chunk) * dev->param.total_bytes_per_chunk;
+ int packed_tags_size =
+ dev->param.no_tags_ecc ? sizeof(pt.t) : sizeof(pt);
+ void *packed_tags_ptr =
+ dev->param.no_tags_ecc ? (void *)&pt.t : (void *)&pt;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_read_chunk_tags chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (dev->param.inband_tags) {
+
+ if (!data) {
+ local_data = 1;
+ data = yaffs_get_temp_buffer(dev);
+ }
+
+ }
+
+ if (dev->param.inband_tags || (data && !tags))
+ retval = mtd_read(mtd, addr, dev->param.total_bytes_per_chunk,
+ &dummy, data);
+ else if (tags) {
+ ops.mode = MTD_OPS_AUTO_OOB;
+ ops.ooblen = packed_tags_size;
+ ops.len = data ? dev->data_bytes_per_chunk : packed_tags_size;
+ ops.ooboffs = 0;
+ ops.datbuf = data;
+ ops.oobbuf = local_spare;
+ retval = mtd_read_oob(mtd, addr, &ops);
+ }
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp =
+ (struct yaffs_packed_tags2_tags_only *)
+ &data[dev->data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else {
+ if (tags) {
+ memcpy(packed_tags_ptr,
+ local_spare,
+ packed_tags_size);
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+ }
+
+ if (local_data)
+ yaffs_release_temp_buffer(dev, data);
+
+ if (tags && retval == -EBADMSG
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ dev->n_ecc_unfixed++;
+ }
+ if (tags && retval == -EUCLEAN
+ && tags->ecc_result == YAFFS_ECC_RESULT_NO_ERROR) {
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ dev->n_ecc_fixed++;
+ }
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
+
+
+int nandmtd2_MarkNANDBlockBad(struct yaffs_dev *dev, int blockNo)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->driver_context);
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_MarkNANDBlockBad %d", blockNo);
+
+ retval =
+ mtd_block_markbad(mtd,
+ blockNo * dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+
+}
+
+int nandmtd2_QueryNANDBlock(struct yaffs_dev *dev, int blockNo,
+ enum yaffs_block_state *state, u32 *sequenceNumber)
+{
+ struct mtd_info *mtd = (struct mtd_info *)(dev->driver_context);
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_MTD, "nandmtd2_QueryNANDBlock %d", blockNo);
+ retval =
+ mtd_block_isbad(mtd,
+ blockNo * dev->param.chunks_per_block *
+ dev->data_bytes_per_chunk);
+
+ if (retval) {
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad");
+
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ *sequenceNumber = 0;
+ } else {
+ struct yaffs_ext_tags t;
+ nandmtd2_read_chunk_tags(dev,
+ blockNo *
+ dev->param.chunks_per_block, NULL,
+ &t);
+
+ if (t.chunk_used) {
+ *sequenceNumber = t.seq_number;
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ } else {
+ *sequenceNumber = 0;
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ }
+ }
+ yaffs_trace(YAFFS_TRACE_MTD, "block is bad seq %d state %d",
+ *sequenceNumber, *state);
+
+ if (retval == 0)
+ return YAFFS_OK;
+ else
+ return YAFFS_FAIL;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.h
new file mode 100644
index 000000000..62be17363
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_mtdif2.h
@@ -0,0 +1,30 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_MTDIF2_H__
+#define __YAFFS_MTDIF2_H__
+
+#include "yaffs_guts.h"
+
+int nandmtd2_write_chunk_tags(struct yaffs_dev *dev, int chunkInNAND,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags);
+int nandmtd2_read_chunk_tags(struct yaffs_dev *dev, int chunkInNAND,
+ u8 *data, struct yaffs_ext_tags *tags);
+int nandmtd2_MarkNANDBlockBad(struct yaffs_dev *dev, int blockNo);
+int nandmtd2_QueryNANDBlock(struct yaffs_dev *dev, int blockNo,
+ enum yaffs_block_state *state, u32 *sequenceNumber);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.c
new file mode 100644
index 000000000..a20a3e47c
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.c
@@ -0,0 +1,208 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This simple implementation of a name-value store assumes a small number of
+* values and fits into a small finite buffer.
+ *
+ * Each attribute is stored as a record:
+ * sizeof(int) bytes record size.
+ * yaffs_strnlen+1 bytes name null terminated.
+ * nbytes value.
+ * ----------
+ * total size stored in record size
+ *
+ * This code has not been tested with unicode yet.
+ */
+
+#include "yaffs_nameval.h"
+
+#include "yportenv.h"
+
+static int nval_find(const char *xb, int xb_size, const YCHAR *name,
+ int *exist_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ if (!yaffs_strncmp((YCHAR *) (xb + pos + sizeof(int)),
+ name, size)) {
+ if (exist_size)
+ *exist_size = size;
+ return pos;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ if (exist_size)
+ *exist_size = 0;
+ return -ENODATA;
+}
+
+static int nval_used(const char *xb, int xb_size)
+{
+ int pos = 0;
+ int size;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return pos;
+}
+
+int nval_del(char *xb, int xb_size, const YCHAR *name)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos < 0 || pos >= xb_size)
+ return -ENODATA;
+
+ /* Find size, shift rest over this record,
+ * then zero out the rest of buffer */
+ memcpy(&size, xb + pos, sizeof(int));
+ memcpy(xb + pos, xb + pos + size, xb_size - (pos + size));
+ memset(xb + (xb_size - size), 0, size);
+ return 0;
+}
+
+int nval_set(char *xb, int xb_size, const YCHAR *name, const char *buf,
+ int bsize, int flags)
+{
+ int pos;
+ int namelen = yaffs_strnlen(name, xb_size);
+ int reclen;
+ int size_exist = 0;
+ int space;
+ int start;
+
+ pos = nval_find(xb, xb_size, name, &size_exist);
+
+ if (flags & XATTR_CREATE && pos >= 0)
+ return -EEXIST;
+ if (flags & XATTR_REPLACE && pos < 0)
+ return -ENODATA;
+
+ start = nval_used(xb, xb_size);
+ space = xb_size - start + size_exist;
+
+ reclen = (sizeof(int) + namelen + 1 + bsize);
+
+ if (reclen > space)
+ return -ENOSPC;
+
+ if (pos >= 0) {
+ nval_del(xb, xb_size, name);
+ start = nval_used(xb, xb_size);
+ }
+
+ pos = start;
+
+ memcpy(xb + pos, &reclen, sizeof(int));
+ pos += sizeof(int);
+ yaffs_strncpy((YCHAR *) (xb + pos), name, reclen);
+ pos += (namelen + 1);
+ memcpy(xb + pos, buf, bsize);
+ return 0;
+}
+
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize)
+{
+ int pos = nval_find(xb, xb_size, name, NULL);
+ int size;
+
+ if (pos >= 0 && pos < xb_size) {
+
+ memcpy(&size, xb + pos, sizeof(int));
+ pos += sizeof(int); /* advance past record length */
+ size -= sizeof(int);
+
+ /* Advance over name string */
+ while (xb[pos] && size > 0 && pos < xb_size) {
+ pos++;
+ size--;
+ }
+ /*Advance over NUL */
+ pos++;
+ size--;
+
+ /* If bsize is zero then this is a size query.
+ * Return the size, but don't copy.
+ */
+ if (!bsize)
+ return size;
+
+ if (size <= bsize) {
+ memcpy(buf, xb + pos, size);
+ return size;
+ }
+ }
+ if (pos >= 0)
+ return -ERANGE;
+
+ return -ENODATA;
+}
+
+int nval_list(const char *xb, int xb_size, char *buf, int bsize)
+{
+ int pos = 0;
+ int size;
+ int name_len;
+ int ncopied = 0;
+ int filled = 0;
+
+ memcpy(&size, xb + pos, sizeof(int));
+ while (size > sizeof(int) &&
+ size <= xb_size &&
+ (pos + size) < xb_size &&
+ !filled) {
+ pos += sizeof(int);
+ size -= sizeof(int);
+ name_len = yaffs_strnlen((YCHAR *) (xb + pos), size);
+ if (ncopied + name_len + 1 < bsize) {
+ memcpy(buf, xb + pos, name_len * sizeof(YCHAR));
+ buf += name_len;
+ *buf = '\0';
+ buf++;
+ if (sizeof(YCHAR) > 1) {
+ *buf = '\0';
+ buf++;
+ }
+ ncopied += (name_len + 1);
+ } else {
+ filled = 1;
+ }
+ pos += size;
+ if (pos < xb_size - sizeof(int))
+ memcpy(&size, xb + pos, sizeof(int));
+ else
+ size = 0;
+ }
+ return ncopied;
+}
+
+int nval_hasvalues(const char *xb, int xb_size)
+{
+ return nval_used(xb, xb_size) > 0;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.h
new file mode 100644
index 000000000..951e64f87
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nameval.h
@@ -0,0 +1,28 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __NAMEVAL_H__
+#define __NAMEVAL_H__
+
+#include "yportenv.h"
+
+int nval_del(char *xb, int xb_size, const YCHAR * name);
+int nval_set(char *xb, int xb_size, const YCHAR * name, const char *buf,
+ int bsize, int flags);
+int nval_get(const char *xb, int xb_size, const YCHAR * name, char *buf,
+ int bsize);
+int nval_list(const char *xb, int xb_size, char *buf, int bsize);
+int nval_hasvalues(const char *xb, int xb_size);
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nand.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_nand.c
new file mode 100644
index 000000000..165d01004
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nand.c
@@ -0,0 +1,120 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_nand.h"
+#include "yaffs_tagscompat.h"
+
+#include "yaffs_getblockinfo.h"
+#include "yaffs_summary.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ struct yaffs_ext_tags local_tags;
+ int flash_chunk = nand_chunk - dev->chunk_offset;
+
+ dev->n_page_reads++;
+
+ /* If there are no tags provided use local tags. */
+ if (!tags)
+ tags = &local_tags;
+
+ if (dev->param.read_chunk_tags_fn)
+ result =
+ dev->param.read_chunk_tags_fn(dev, flash_chunk, buffer,
+ tags);
+ else
+ result = yaffs_tags_compat_rd(dev,
+ flash_chunk, buffer, tags);
+ if (tags && tags->ecc_result > YAFFS_ECC_RESULT_NO_ERROR) {
+
+ struct yaffs_block_info *bi;
+ bi = yaffs_get_block_info(dev,
+ nand_chunk /
+ dev->param.chunks_per_block);
+ yaffs_handle_chunk_error(dev, bi);
+ }
+ return result;
+}
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags)
+{
+ int result;
+ int flash_chunk = nand_chunk - dev->chunk_offset;
+
+ dev->n_page_writes++;
+
+ if (tags) {
+ tags->seq_number = dev->seq_number;
+ tags->chunk_used = 1;
+ yaffs_trace(YAFFS_TRACE_WRITE,
+ "Writing chunk %d tags %d %d",
+ nand_chunk, tags->obj_id, tags->chunk_id);
+ } else {
+ yaffs_trace(YAFFS_TRACE_ERROR, "Writing with no tags");
+ BUG();
+ return YAFFS_FAIL;
+ }
+
+ if (dev->param.write_chunk_tags_fn)
+ result = dev->param.write_chunk_tags_fn(dev, flash_chunk,
+ buffer, tags);
+ else
+ result = yaffs_tags_compat_wr(dev, flash_chunk, buffer, tags);
+
+ yaffs_summary_add(dev, tags, nand_chunk);
+
+ return result;
+}
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no)
+{
+ block_no -= dev->block_offset;
+ if (dev->param.bad_block_fn)
+ return dev->param.bad_block_fn(dev, block_no);
+
+ return yaffs_tags_compat_mark_bad(dev, block_no);
+}
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ block_no -= dev->block_offset;
+ if (dev->param.query_block_fn)
+ return dev->param.query_block_fn(dev, block_no, state,
+ seq_number);
+
+ return yaffs_tags_compat_query_block(dev, block_no, state, seq_number);
+}
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block)
+{
+ int result;
+
+ flash_block -= dev->block_offset;
+ dev->n_erasures++;
+ result = dev->param.erase_fn(dev, flash_block);
+ return result;
+}
+
+int yaffs_init_nand(struct yaffs_dev *dev)
+{
+ if (dev->param.initialise_flash_fn)
+ return dev->param.initialise_flash_fn(dev);
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nand.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_nand.h
new file mode 100644
index 000000000..71346627f
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nand.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_NAND_H__
+#define __YAFFS_NAND_H__
+#include "yaffs_guts.h"
+
+int yaffs_rd_chunk_tags_nand(struct yaffs_dev *dev, int nand_chunk,
+ u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_wr_chunk_tags_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *buffer, struct yaffs_ext_tags *tags);
+
+int yaffs_mark_bad(struct yaffs_dev *dev, int block_no);
+
+int yaffs_query_init_block_state(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ unsigned *seq_number);
+
+int yaffs_erase_block(struct yaffs_dev *dev, int flash_block);
+
+int yaffs_init_nand(struct yaffs_dev *dev);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nandemul2k.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_nandemul2k.h
new file mode 100644
index 000000000..cb0c4e64d
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nandemul2k.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* Interface to emulated NAND functions (2k page size) */
+
+#ifndef __YAFFS_NANDEMUL2K_H__
+#define __YAFFS_NANDEMUL2K_H__
+
+#include "yaffs_guts.h"
+
+int nandemul2k_WriteChunkWithTagsToNAND(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ const struct yaffs_ext_tags *tags);
+int nandemul2k_ReadChunkWithTagsFromNAND(struct yaffs_dev *dev,
+ int nand_chunk, u8 *data,
+ struct yaffs_ext_tags *tags);
+int nandemul2k_MarkNANDBlockBad(struct yaffs_dev *dev, int block_no);
+int nandemul2k_QueryNANDBlock(struct yaffs_dev *dev, int block_no,
+ enum yaffs_block_state *state, u32 *seq_number);
+int nandemul2k_EraseBlockInNAND(struct yaffs_dev *dev,
+ int flash_block);
+int nandemul2k_InitialiseNAND(struct yaffs_dev *dev);
+int nandemul2k_GetBytesPerChunk(void);
+int nandemul2k_GetChunksPerBlock(void);
+int nandemul2k_GetNumberOfBlocks(void);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.c
new file mode 100644
index 000000000..79b00ab3b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.c
@@ -0,0 +1,251 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yportenv.h"
+#include "yaffs_guts.h"
+
+
+#include "yaffs_nandif.h"
+#include "yaffs_packedtags2.h"
+
+#include "yramsim.h"
+
+#include "yaffs_trace.h"
+#include "yaffsfs.h"
+
+
+/* NB For use with inband tags....
+ * We assume that the data buffer is of size totalBytersPerChunk so that
+ * we can also use it to load the tags.
+ */
+int ynandif_WriteChunkWithTagsToNAND(struct yaffs_dev *dev, int nand_chunk,
+ const u8 *data,
+ const struct yaffs_ext_tags *tags)
+{
+
+ int retval = 0;
+ struct yaffs_packed_tags2 pt;
+ void *spare;
+ unsigned spareSize = 0;
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_WriteChunkWithTagsToNAND chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+
+ /* For yaffs2 writing there must be both data and tags.
+ * If we're using inband tags, then the tags are stuffed into
+ * the end of the data buffer.
+ */
+
+ if (dev->param.inband_tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+
+ pt2tp = (struct yaffs_packed_tags2_tags_only *)
+ (data + dev->data_bytes_per_chunk);
+ yaffs_pack_tags2_tags_only(pt2tp, tags);
+ spare = NULL;
+ spareSize = 0;
+ } else {
+ yaffs_pack_tags2(&pt, tags, !dev->param.no_tags_ecc);
+ spare = &pt;
+ spareSize = sizeof(struct yaffs_packed_tags2);
+ }
+
+ retval = geometry->writeChunk(dev, nand_chunk,
+ data, dev->param.total_bytes_per_chunk,
+ spare, spareSize);
+
+ return retval;
+}
+
+int ynandif_ReadChunkWithTagsFromNAND(struct yaffs_dev *dev, int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags)
+{
+ struct yaffs_packed_tags2 pt;
+ int localData = 0;
+ void *spare = NULL;
+ unsigned spareSize;
+ int retval = 0;
+ int eccStatus; /* 0 = ok, 1 = fixed, -1 = unfixed */
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "nandmtd2_ReadChunkWithTagsFromNAND chunk %d data %p tags %p",
+ nand_chunk, data, tags);
+
+ if (!tags) {
+ spare = NULL;
+ spareSize = 0;
+ } else if (dev->param.inband_tags) {
+
+ if (!data) {
+ localData = 1;
+ data = yaffs_get_temp_buffer(dev);
+ }
+ spare = NULL;
+ spareSize = 0;
+ } else {
+ spare = &pt;
+ spareSize = sizeof(struct yaffs_packed_tags2);
+ }
+
+ retval = geometry->readChunk(dev, nand_chunk,
+ data,
+ data ? dev->param.total_bytes_per_chunk : 0,
+ spare, spareSize,
+ &eccStatus);
+
+ if (dev->param.inband_tags) {
+ if (tags) {
+ struct yaffs_packed_tags2_tags_only *pt2tp;
+ pt2tp = (struct yaffs_packed_tags2_tags_only *)
+ &data[dev->data_bytes_per_chunk];
+ yaffs_unpack_tags2_tags_only(tags, pt2tp);
+ }
+ } else {
+ if (tags)
+ yaffs_unpack_tags2(tags, &pt, !dev->param.no_tags_ecc);
+ }
+
+ if (tags && tags->chunk_used) {
+ if (eccStatus < 0 ||
+ tags->ecc_result == YAFFS_ECC_RESULT_UNFIXED)
+ tags->ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (eccStatus > 0 ||
+ tags->ecc_result == YAFFS_ECC_RESULT_FIXED)
+ tags->ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ tags->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+
+ if (localData)
+ yaffs_release_temp_buffer(dev, data);
+
+ return retval;
+}
+
+int ynandif_MarkNANDBlockBad(struct yaffs_dev *dev, int blockId)
+{
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ return geometry->markBlockBad(dev, blockId);
+}
+
+int ynandif_EraseBlockInNAND(struct yaffs_dev *dev, int blockId)
+{
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ return geometry->eraseBlock(dev, blockId);
+
+}
+
+
+static int ynandif_IsBlockOk(struct yaffs_dev *dev, int blockId)
+{
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ return geometry->checkBlockOk(dev, blockId);
+}
+
+int ynandif_QueryNANDBlock(struct yaffs_dev *dev, int blockId,
+ enum yaffs_block_state *state, u32 *seq_number)
+{
+ unsigned chunkNo;
+ struct yaffs_ext_tags tags;
+
+ *seq_number = 0;
+
+ chunkNo = blockId * dev->param.chunks_per_block;
+
+ if (!ynandif_IsBlockOk(dev, blockId)) {
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ } else {
+ ynandif_ReadChunkWithTagsFromNAND(dev, chunkNo, NULL, &tags);
+
+ if (!tags.chunk_used) {
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ } else {
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+ *seq_number = tags.seq_number;
+ }
+ }
+
+ return YAFFS_OK;
+}
+
+
+int ynandif_InitialiseNAND(struct yaffs_dev *dev)
+{
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ geometry->initialise(dev);
+
+ return YAFFS_OK;
+}
+
+int ynandif_Deinitialise_flash_fn(struct yaffs_dev *dev)
+{
+ struct ynandif_Geometry *geometry = (struct ynandif_Geometry *)(dev->driver_context);
+
+ geometry->deinitialise(dev);
+
+ return YAFFS_OK;
+}
+
+
+struct yaffs_dev *
+ yaffs_add_dev_from_geometry(const YCHAR *name,
+ const struct ynandif_Geometry *geometry)
+{
+ YCHAR *clonedName = malloc(sizeof(YCHAR) *
+ (strnlen(name, YAFFS_MAX_NAME_LENGTH)+1));
+ struct yaffs_dev *dev = malloc(sizeof(struct yaffs_dev));
+ struct yaffs_param *param;
+
+ if (dev && clonedName) {
+ memset(dev, 0, sizeof(struct yaffs_dev));
+ strcpy(clonedName, name);
+
+ param = &dev->param;
+
+ param->name = clonedName;
+ param->write_chunk_tags_fn = ynandif_WriteChunkWithTagsToNAND;
+ param->read_chunk_tags_fn = ynandif_ReadChunkWithTagsFromNAND;
+ param->erase_fn = ynandif_EraseBlockInNAND;
+ param->initialise_flash_fn = ynandif_InitialiseNAND;
+ param->query_block_fn = ynandif_QueryNANDBlock;
+ param->bad_block_fn = ynandif_MarkNANDBlockBad;
+ param->n_caches = 20;
+ param->start_block = geometry->start_block;
+ param->end_block = geometry->end_block;
+ param->total_bytes_per_chunk = geometry->dataSize;
+ param->spare_bytes_per_chunk = geometry->spareSize;
+ param->inband_tags = geometry->inband_tags;
+ param->chunks_per_block = geometry->pagesPerBlock;
+ param->use_nand_ecc = geometry->hasECC;
+ param->is_yaffs2 = geometry->useYaffs2;
+ param->n_reserved_blocks = 5;
+ dev->driver_context = (void *)geometry;
+
+ yaffs_add_device(dev);
+
+ return dev;
+ }
+
+ free(dev);
+ free(clonedName);
+
+ return NULL;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.h
new file mode 100644
index 000000000..e780f7f3b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_nandif.h
@@ -0,0 +1,65 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+
+#ifndef __YNANDIF_H__
+#define __YNANDIF_H__
+
+#include "yaffs_guts.h"
+
+struct ynandif_Geometry {
+ unsigned start_block;
+ unsigned end_block;
+ unsigned dataSize;
+ unsigned spareSize;
+ unsigned pagesPerBlock;
+ unsigned hasECC;
+ unsigned inband_tags;
+ unsigned useYaffs2;
+
+ int (*initialise)(struct yaffs_dev *dev);
+ int (*deinitialise)(struct yaffs_dev *dev);
+
+ int (*readChunk) (struct yaffs_dev *dev,
+ unsigned pageId,
+ unsigned char *data,
+ unsigned dataLength,
+ unsigned char *spare,
+ unsigned spareLength,
+ int *eccStatus);
+ /* ECC status is set to 0 for OK, 1 for fixed, -1 for unfixed. */
+
+ int (*writeChunk)(struct yaffs_dev *dev,
+ unsigned pageId,
+ const unsigned char *data,
+ unsigned dataLength,
+ const unsigned char *spare,
+ unsigned spareLength);
+
+ int (*eraseBlock)(struct yaffs_dev *dev, unsigned blockId);
+
+ int (*checkBlockOk)(struct yaffs_dev *dev, unsigned blockId);
+ int (*markBlockBad)(struct yaffs_dev *dev, unsigned blockId);
+
+ void *privateData;
+
+};
+
+struct yaffs_dev *
+ yaffs_add_dev_from_geometry(const YCHAR *name,
+ const struct ynandif_Geometry *geometry);
+
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_osglue.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_osglue.h
new file mode 100644
index 000000000..f629a4b59
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_osglue.h
@@ -0,0 +1,41 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Header file for using yaffs in an application via
+ * a direct interface.
+ */
+
+
+#ifndef __YAFFS_OSGLUE_H__
+#define __YAFFS_OSGLUE_H__
+
+
+#include "yportenv.h"
+
+void yaffsfs_Lock(void);
+void yaffsfs_Unlock(void);
+
+u32 yaffsfs_CurrentTime(void);
+
+void yaffsfs_SetError(int err);
+
+void *yaffsfs_malloc(size_t size);
+void yaffsfs_free(void *ptr);
+
+void yaffsfs_OSInitialisation(void);
+
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.c
new file mode 100644
index 000000000..dd9a331d8
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.c
@@ -0,0 +1,56 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags1.h"
+#include "yportenv.h"
+
+static const u8 all_ff[20] = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t)
+{
+ pt->chunk_id = t->chunk_id;
+ pt->serial_number = t->serial_number;
+ pt->n_bytes = t->n_bytes;
+ pt->obj_id = t->obj_id;
+ pt->ecc = 0;
+ pt->deleted = (t->is_deleted) ? 0 : 1;
+ pt->unused_stuff = 0;
+ pt->should_be_ff = 0xffffffff;
+}
+
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt)
+{
+
+ if (memcmp(all_ff, pt, sizeof(struct yaffs_packed_tags1))) {
+ t->block_bad = 0;
+ if (pt->should_be_ff != 0xffffffff)
+ t->block_bad = 1;
+ t->chunk_used = 1;
+ t->obj_id = pt->obj_id;
+ t->chunk_id = pt->chunk_id;
+ t->n_bytes = pt->n_bytes;
+ t->ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ t->is_deleted = (pt->deleted) ? 0 : 1;
+ t->serial_number = pt->serial_number;
+ } else {
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+ }
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.h
new file mode 100644
index 000000000..b80f0a5b1
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags1.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS1 tags, not YAFFS2 tags. */
+
+#ifndef __YAFFS_PACKEDTAGS1_H__
+#define __YAFFS_PACKEDTAGS1_H__
+
+#include "yaffs_guts.h"
+
+struct yaffs_packed_tags1 {
+ unsigned chunk_id:20;
+ unsigned serial_number:2;
+ unsigned n_bytes:10;
+ unsigned obj_id:18;
+ unsigned ecc:12;
+ unsigned deleted:1;
+ unsigned unused_stuff:1;
+ unsigned should_be_ff;
+
+};
+
+void yaffs_pack_tags1(struct yaffs_packed_tags1 *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags1(struct yaffs_ext_tags *t,
+ const struct yaffs_packed_tags1 *pt);
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.c
new file mode 100644
index 000000000..e1d18cc33
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.c
@@ -0,0 +1,197 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_packedtags2.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+/* This code packs a set of extended tags into a binary structure for
+ * NAND storage
+ */
+
+/* Some of the information is "extra" struff which can be packed in to
+ * speed scanning
+ * This is defined by having the EXTRA_HEADER_INFO_FLAG set.
+ */
+
+/* Extra flags applied to chunk_id */
+
+#define EXTRA_HEADER_INFO_FLAG 0x80000000
+#define EXTRA_SHRINK_FLAG 0x40000000
+#define EXTRA_SHADOWS_FLAG 0x20000000
+#define EXTRA_SPARE_FLAGS 0x10000000
+
+#define ALL_EXTRA_FLAGS 0xf0000000
+
+/* Also, the top 4 bits of the object Id are set to the object type. */
+#define EXTRA_OBJECT_TYPE_SHIFT (28)
+#define EXTRA_OBJECT_TYPE_MASK ((0x0f) << EXTRA_OBJECT_TYPE_SHIFT)
+
+static void yaffs_dump_packed_tags2_tags_only(
+ const struct yaffs_packed_tags2_tags_only *ptt)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "packed tags obj %d chunk %d byte %d seq %d",
+ ptt->obj_id, ptt->chunk_id, ptt->n_bytes, ptt->seq_number);
+}
+
+static void yaffs_dump_packed_tags2(const struct yaffs_packed_tags2 *pt)
+{
+ yaffs_dump_packed_tags2_tags_only(&pt->t);
+}
+
+static void yaffs_dump_tags2(const struct yaffs_ext_tags *t)
+{
+ yaffs_trace(YAFFS_TRACE_MTD,
+ "ext.tags eccres %d blkbad %d chused %d obj %d chunk%d byte %d del %d ser %d seq %d",
+ t->ecc_result, t->block_bad, t->chunk_used, t->obj_id,
+ t->chunk_id, t->n_bytes, t->is_deleted, t->serial_number,
+ t->seq_number);
+
+}
+
+static int yaffs_check_tags_extra_packable(const struct yaffs_ext_tags *t)
+{
+ if (t->chunk_id != 0 || !t->extra_available)
+ return 0;
+
+ /* Check if the file size is too long to store */
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE &&
+ (t->extra_file_size >> 31) != 0)
+ return 0;
+ return 1;
+}
+
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *ptt,
+ const struct yaffs_ext_tags *t)
+{
+ ptt->chunk_id = t->chunk_id;
+ ptt->seq_number = t->seq_number;
+ ptt->n_bytes = t->n_bytes;
+ ptt->obj_id = t->obj_id;
+
+ /* Only store extra tags for object headers.
+ * If it is a file then only store if the file size is short\
+ * enough to fit.
+ */
+ if (yaffs_check_tags_extra_packable(t)) {
+ /* Store the extra header info instead */
+ /* We save the parent object in the chunk_id */
+ ptt->chunk_id = EXTRA_HEADER_INFO_FLAG | t->extra_parent_id;
+ if (t->extra_is_shrink)
+ ptt->chunk_id |= EXTRA_SHRINK_FLAG;
+ if (t->extra_shadows)
+ ptt->chunk_id |= EXTRA_SHADOWS_FLAG;
+
+ ptt->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+ ptt->obj_id |= (t->extra_obj_type << EXTRA_OBJECT_TYPE_SHIFT);
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ ptt->n_bytes = t->extra_equiv_id;
+ else if (t->extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ ptt->n_bytes = (unsigned) t->extra_file_size;
+ else
+ ptt->n_bytes = 0;
+ }
+
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc)
+{
+ yaffs_pack_tags2_tags_only(&pt->t, t);
+
+ if (tags_ecc)
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc);
+}
+
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *ptt)
+{
+ memset(t, 0, sizeof(struct yaffs_ext_tags));
+
+ if (ptt->seq_number == 0xffffffff)
+ return;
+
+ t->block_bad = 0;
+ t->chunk_used = 1;
+ t->obj_id = ptt->obj_id;
+ t->chunk_id = ptt->chunk_id;
+ t->n_bytes = ptt->n_bytes;
+ t->is_deleted = 0;
+ t->serial_number = 0;
+ t->seq_number = ptt->seq_number;
+
+ /* Do extra header info stuff */
+ if (ptt->chunk_id & EXTRA_HEADER_INFO_FLAG) {
+ t->chunk_id = 0;
+ t->n_bytes = 0;
+
+ t->extra_available = 1;
+ t->extra_parent_id = ptt->chunk_id & (~(ALL_EXTRA_FLAGS));
+ t->extra_is_shrink = ptt->chunk_id & EXTRA_SHRINK_FLAG ? 1 : 0;
+ t->extra_shadows = ptt->chunk_id & EXTRA_SHADOWS_FLAG ? 1 : 0;
+ t->extra_obj_type = ptt->obj_id >> EXTRA_OBJECT_TYPE_SHIFT;
+ t->obj_id &= ~EXTRA_OBJECT_TYPE_MASK;
+
+ if (t->extra_obj_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ t->extra_equiv_id = ptt->n_bytes;
+ else
+ t->extra_file_size = ptt->n_bytes;
+ }
+ yaffs_dump_packed_tags2_tags_only(ptt);
+ yaffs_dump_tags2(t);
+}
+
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc)
+{
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ if (pt->t.seq_number != 0xffffffff && tags_ecc) {
+ /* Chunk is in use and we need to do ECC */
+
+ struct yaffs_ecc_other ecc;
+ int result;
+ yaffs_ecc_calc_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &ecc);
+ result =
+ yaffs_ecc_correct_other((unsigned char *)&pt->t,
+ sizeof(struct yaffs_packed_tags2_tags_only),
+ &pt->ecc, &ecc);
+ switch (result) {
+ case 0:
+ ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ break;
+ case 1:
+ ecc_result = YAFFS_ECC_RESULT_FIXED;
+ break;
+ case -1:
+ ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ break;
+ default:
+ ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ }
+ }
+ yaffs_unpack_tags2_tags_only(t, &pt->t);
+
+ t->ecc_result = ecc_result;
+
+ yaffs_dump_packed_tags2(pt);
+ yaffs_dump_tags2(t);
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.h
new file mode 100644
index 000000000..675e71946
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_packedtags2.h
@@ -0,0 +1,47 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/* This is used to pack YAFFS2 tags, not YAFFS1tags. */
+
+#ifndef __YAFFS_PACKEDTAGS2_H__
+#define __YAFFS_PACKEDTAGS2_H__
+
+#include "yaffs_guts.h"
+#include "yaffs_ecc.h"
+
+struct yaffs_packed_tags2_tags_only {
+ unsigned seq_number;
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+struct yaffs_packed_tags2 {
+ struct yaffs_packed_tags2_tags_only t;
+ struct yaffs_ecc_other ecc;
+};
+
+/* Full packed tags with ECC, used for oob tags */
+void yaffs_pack_tags2(struct yaffs_packed_tags2 *pt,
+ const struct yaffs_ext_tags *t, int tags_ecc);
+void yaffs_unpack_tags2(struct yaffs_ext_tags *t, struct yaffs_packed_tags2 *pt,
+ int tags_ecc);
+
+/* Only the tags part (no ECC for use with inband tags */
+void yaffs_pack_tags2_tags_only(struct yaffs_packed_tags2_tags_only *pt,
+ const struct yaffs_ext_tags *t);
+void yaffs_unpack_tags2_tags_only(struct yaffs_ext_tags *t,
+ struct yaffs_packed_tags2_tags_only *pt);
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_qsort.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_qsort.c
new file mode 100644
index 000000000..1ca589574
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_qsort.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "yportenv.h"
+/* #include <linux/string.h> */
+
+/*
+ * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function".
+ */
+#define swapcode(TYPE, parmi, parmj, n) do { \
+ long i = (n) / sizeof(TYPE); \
+ register TYPE *pi = (TYPE *) (parmi); \
+ register TYPE *pj = (TYPE *) (parmj); \
+ do { \
+ register TYPE t = *pi; \
+ *pi++ = *pj; \
+ *pj++ = t; \
+ } while (--i > 0); \
+} while (0)
+
+#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \
+ es % sizeof(long) ? 2 : es == sizeof(long) ? 0 : 1;
+
+static inline void
+swapfunc(char *a, char *b, int n, int swaptype)
+{
+ if (swaptype <= 1)
+ swapcode(long, a, b, n);
+ else
+ swapcode(char, a, b, n);
+}
+
+#define yswap(a, b) do { \
+ if (swaptype == 0) { \
+ long t = *(long *)(a); \
+ *(long *)(a) = *(long *)(b); \
+ *(long *)(b) = t; \
+ } else \
+ swapfunc(a, b, es, swaptype); \
+} while (0)
+
+#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype)
+
+static inline char *
+med3(char *a, char *b, char *c, int (*cmp)(const void *, const void *))
+{
+ return cmp(a, b) < 0 ?
+ (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a))
+ : (cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c));
+}
+
+#ifndef min
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+void
+yaffs_qsort(void *aa, size_t n, size_t es,
+ int (*cmp)(const void *, const void *))
+{
+ char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
+ int d, r, swaptype, swap_cnt;
+ register char *a = aa;
+
+loop: SWAPINIT(a, es);
+ swap_cnt = 0;
+ if (n < 7) {
+ for (pm = (char *)a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+ yswap(pl, pl - es);
+ return;
+ }
+ pm = (char *)a + (n / 2) * es;
+ if (n > 7) {
+ pl = (char *)a;
+ pn = (char *)a + (n - 1) * es;
+ if (n > 40) {
+ d = (n / 8) * es;
+ pl = med3(pl, pl + d, pl + 2 * d, cmp);
+ pm = med3(pm - d, pm, pm + d, cmp);
+ pn = med3(pn - 2 * d, pn - d, pn, cmp);
+ }
+ pm = med3(pl, pm, pn, cmp);
+ }
+ yswap(a, pm);
+ pa = pb = (char *)a + es;
+
+ pc = pd = (char *)a + (n - 1) * es;
+ for (;;) {
+ while (pb <= pc && (r = cmp(pb, a)) <= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+ yswap(pa, pb);
+ pa += es;
+ }
+ pb += es;
+ }
+ while (pb <= pc && (r = cmp(pc, a)) >= 0) {
+ if (r == 0) {
+ swap_cnt = 1;
+ yswap(pc, pd);
+ pd -= es;
+ }
+ pc -= es;
+ }
+ if (pb > pc)
+ break;
+ yswap(pb, pc);
+ swap_cnt = 1;
+ pb += es;
+ pc -= es;
+ }
+ if (swap_cnt == 0) { /* Switch to insertion sort */
+ for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
+ for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0;
+ pl -= es)
+ yswap(pl, pl - es);
+ return;
+ }
+
+ pn = (char *)a + n * es;
+ r = min(pa - (char *)a, pb - pa);
+ vecswap(a, pb - r, r);
+ r = min((long)(pd - pc), (long)(pn - pd - es));
+ vecswap(pb, pn - r, r);
+ r = pb - pa;
+ if (r > es)
+ yaffs_qsort(a, r / es, es, cmp);
+ r = pd - pc;
+ if (r > es) {
+ /* Iterate rather than recurse to save stack space */
+ a = pn - r;
+ n = r / es;
+ goto loop;
+ }
+/* yaffs_qsort(pn - r, r / es, es, cmp);*/
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_summary.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_summary.c
new file mode 100644
index 000000000..e9e1b5d85
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_summary.c
@@ -0,0 +1,309 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Summaries write the useful part of the tags for the chunks in a block into an
+ * an array which is written to the last n chunks of the block.
+ * Reading the summaries gives all the tags for the block in one read. Much
+ * faster.
+ *
+ * Chunks holding summaries are marked with tags making it look like
+ * they are part of a fake file.
+ *
+ * The summary could also be used during gc.
+ *
+ */
+
+#include "yaffs_summary.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_bitmap.h"
+
+/*
+ * The summary is built up in an array of summary tags.
+ * This gets written to the last one or two (maybe more) chunks in a block.
+ * A summary header is written as the first part of each chunk of summary data.
+ * The summary header must match or the summary is rejected.
+ */
+
+/* Summary tags don't need the sequence number because that is redundant. */
+struct yaffs_summary_tags {
+ unsigned obj_id;
+ unsigned chunk_id;
+ unsigned n_bytes;
+};
+
+/* Summary header */
+struct yaffs_summary_header {
+ unsigned version; /* Must match current version */
+ unsigned block; /* Must be this block */
+ unsigned seq; /* Must be this sequence number */
+ unsigned sum; /* Just add up all the bytes in the tags */
+};
+
+
+static void yaffs_summary_clear(struct yaffs_dev *dev)
+{
+ if (!dev->sum_tags)
+ return;
+ memset(dev->sum_tags, 0, dev->chunks_per_summary *
+ sizeof(struct yaffs_summary_tags));
+}
+
+
+void yaffs_summary_deinit(struct yaffs_dev *dev)
+{
+ kfree(dev->sum_tags);
+ dev->sum_tags = NULL;
+ kfree(dev->gc_sum_tags);
+ dev->gc_sum_tags = NULL;
+ dev->chunks_per_summary = 0;
+}
+
+int yaffs_summary_init(struct yaffs_dev *dev)
+{
+ int sum_bytes;
+ int chunks_used; /* Number of chunks used by summary */
+ int sum_tags_bytes;
+
+ sum_bytes = dev->param.chunks_per_block *
+ sizeof(struct yaffs_summary_tags);
+
+ chunks_used = (sum_bytes + dev->data_bytes_per_chunk - 1)/
+ (dev->data_bytes_per_chunk -
+ sizeof(struct yaffs_summary_header));
+
+ dev->chunks_per_summary = dev->param.chunks_per_block - chunks_used;
+ sum_tags_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ dev->sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ dev->gc_sum_tags = kmalloc(sum_tags_bytes, GFP_NOFS);
+ if (!dev->sum_tags || !dev->gc_sum_tags) {
+ yaffs_summary_deinit(dev);
+ return YAFFS_FAIL;
+ }
+
+ yaffs_summary_clear(dev);
+
+ return YAFFS_OK;
+}
+
+static unsigned yaffs_summary_sum(struct yaffs_dev *dev)
+{
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int i;
+ unsigned sum = 0;
+
+ i = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ while (i > 0) {
+ sum += *sum_buffer;
+ sum_buffer++;
+ i--;
+ }
+
+ return sum;
+}
+
+static int yaffs_summary_write(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)dev->sum_tags;
+ int n_bytes;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) *
+ dev->chunks_per_summary;
+ memset(&tags, 0, sizeof(struct yaffs_ext_tags));
+ tags.obj_id = YAFFS_OBJECTID_SUMMARY;
+ tags.chunk_id = 1;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = dev->alloc_block * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ hdr.version = YAFFS_SUMMARY_VERSION;
+ hdr.block = blk;
+ hdr.seq = bi->seq_number;
+ hdr.sum = yaffs_summary_sum(dev);
+
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ memcpy(buffer, &hdr, sizeof(hdr));
+ memcpy(buffer + sizeof(hdr), sum_buffer, this_tx);
+ tags.n_bytes = this_tx + sizeof(hdr);
+ result = yaffs_wr_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (result != YAFFS_OK)
+ break;
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ dev->n_free_chunks--;
+
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ tags.chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+
+ if (result == YAFFS_OK)
+ bi->has_summary = 1;
+
+
+ return result;
+}
+
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk)
+{
+ struct yaffs_ext_tags tags;
+ u8 *buffer;
+ u8 *sum_buffer = (u8 *)st;
+ int n_bytes;
+ int chunk_id;
+ int chunk_in_nand;
+ int chunk_in_block;
+ int result;
+ int this_tx;
+ struct yaffs_summary_header hdr;
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int sum_bytes_per_chunk = dev->data_bytes_per_chunk - sizeof(hdr);
+
+ buffer = yaffs_get_temp_buffer(dev);
+ n_bytes = sizeof(struct yaffs_summary_tags) * dev->chunks_per_summary;
+ chunk_in_block = dev->chunks_per_summary;
+ chunk_in_nand = blk * dev->param.chunks_per_block +
+ dev->chunks_per_summary;
+ chunk_id = 1;
+ do {
+ this_tx = n_bytes;
+ if (this_tx > sum_bytes_per_chunk)
+ this_tx = sum_bytes_per_chunk;
+ result = yaffs_rd_chunk_tags_nand(dev, chunk_in_nand,
+ buffer, &tags);
+
+ if (tags.chunk_id != chunk_id ||
+ tags.obj_id != YAFFS_OBJECTID_SUMMARY ||
+ tags.chunk_used == 0 ||
+ tags.ecc_result > YAFFS_ECC_RESULT_FIXED ||
+ tags.n_bytes != (this_tx + sizeof(hdr)))
+ result = YAFFS_FAIL;
+ if (result != YAFFS_OK)
+ break;
+
+ if (st == dev->sum_tags) {
+ /* If we're scanning then update the block info */
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+ }
+ memcpy(&hdr, buffer, sizeof(hdr));
+ memcpy(sum_buffer, buffer + sizeof(hdr), this_tx);
+ n_bytes -= this_tx;
+ sum_buffer += this_tx;
+ chunk_in_nand++;
+ chunk_in_block++;
+ chunk_id++;
+ } while (result == YAFFS_OK && n_bytes > 0);
+ yaffs_release_temp_buffer(dev, buffer);
+
+ if (result == YAFFS_OK) {
+ /* Verify header */
+ if (hdr.version != YAFFS_SUMMARY_VERSION ||
+ hdr.seq != bi->seq_number ||
+ hdr.sum != yaffs_summary_sum(dev))
+ result = YAFFS_FAIL;
+ }
+
+ if (st == dev->sum_tags && result == YAFFS_OK)
+ bi->has_summary = 1;
+
+ return result;
+}
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_nand)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ int block_in_nand = chunk_in_nand / dev->param.chunks_per_block;
+ int chunk_in_block = chunk_in_nand % dev->param.chunks_per_block;
+
+ if (!dev->sum_tags)
+ return YAFFS_OK;
+
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ yaffs_pack_tags2_tags_only(&tags_only, tags);
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ sum_tags->chunk_id = tags_only.chunk_id;
+ sum_tags->n_bytes = tags_only.n_bytes;
+ sum_tags->obj_id = tags_only.obj_id;
+
+ if (chunk_in_block == dev->chunks_per_summary - 1) {
+ /* Time to write out the summary */
+ yaffs_summary_write(dev, block_in_nand);
+ yaffs_summary_clear(dev);
+ yaffs_skip_rest_of_block(dev);
+ }
+ }
+ return YAFFS_OK;
+}
+
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block)
+{
+ struct yaffs_packed_tags2_tags_only tags_only;
+ struct yaffs_summary_tags *sum_tags;
+ if (chunk_in_block >= 0 && chunk_in_block < dev->chunks_per_summary) {
+ sum_tags = &dev->sum_tags[chunk_in_block];
+ tags_only.chunk_id = sum_tags->chunk_id;
+ tags_only.n_bytes = sum_tags->n_bytes;
+ tags_only.obj_id = sum_tags->obj_id;
+ yaffs_unpack_tags2_tags_only(tags, &tags_only);
+ return YAFFS_OK;
+ }
+ return YAFFS_FAIL;
+}
+
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
+{
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
+ int i;
+
+ if (!bi->has_summary)
+ return;
+
+ for (i = dev->chunks_per_summary;
+ i < dev->param.chunks_per_block;
+ i++) {
+ if (yaffs_check_chunk_bit(dev, blk, i)) {
+ yaffs_clear_chunk_bit(dev, blk, i);
+ bi->pages_in_use--;
+ dev->n_free_chunks++;
+ }
+ }
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_summary.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_summary.h
new file mode 100644
index 000000000..be141d073
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_summary.h
@@ -0,0 +1,37 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_SUMMARY_H__
+#define __YAFFS_SUMMARY_H__
+
+#include "yaffs_packedtags2.h"
+
+
+int yaffs_summary_init(struct yaffs_dev *dev);
+void yaffs_summary_deinit(struct yaffs_dev *dev);
+
+int yaffs_summary_add(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_fetch(struct yaffs_dev *dev,
+ struct yaffs_ext_tags *tags,
+ int chunk_in_block);
+int yaffs_summary_read(struct yaffs_dev *dev,
+ struct yaffs_summary_tags *st,
+ int blk);
+void yaffs_summary_gc(struct yaffs_dev *dev, int blk);
+
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.c
new file mode 100644
index 000000000..9ac5896da
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.c
@@ -0,0 +1,407 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_tagscompat.h"
+#include "yaffs_ecc.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_trace.h"
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
+
+
+/********** Tags ECC calculations *********/
+
+void yaffs_calc_ecc(const u8 *data, struct yaffs_spare *spare)
+{
+ yaffs_ecc_calc(data, spare->ecc1);
+ yaffs_ecc_calc(&data[256], spare->ecc2);
+}
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
+{
+ /* Calculate an ecc */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+ unsigned i, j;
+ unsigned ecc = 0;
+ unsigned bit = 0;
+
+ tags->ecc = 0;
+
+ for (i = 0; i < 8; i++) {
+ for (j = 1; j & 0xff; j <<= 1) {
+ bit++;
+ if (b[i] & j)
+ ecc ^= bit;
+ }
+ }
+ tags->ecc = ecc;
+}
+
+int yaffs_check_tags_ecc(struct yaffs_tags *tags)
+{
+ unsigned ecc = tags->ecc;
+
+ yaffs_calc_tags_ecc(tags);
+
+ ecc ^= tags->ecc;
+
+ if (ecc && ecc <= 64) {
+ /* TODO: Handle the failure better. Retire? */
+ unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
+
+ ecc--;
+
+ b[ecc / 8] ^= (1 << (ecc & 7));
+
+ /* Now recvalc the ecc */
+ yaffs_calc_tags_ecc(tags);
+
+ return 1; /* recovered error */
+ } else if (ecc) {
+ /* Wierd ecc failure value */
+ /* TODO Need to do somethiong here */
+ return -1; /* unrecovered error */
+ }
+ return 0;
+}
+
+/********** Tags **********/
+
+static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+
+ yaffs_calc_tags_ecc(tags_ptr);
+
+ spare_ptr->tb0 = tu->as_bytes[0];
+ spare_ptr->tb1 = tu->as_bytes[1];
+ spare_ptr->tb2 = tu->as_bytes[2];
+ spare_ptr->tb3 = tu->as_bytes[3];
+ spare_ptr->tb4 = tu->as_bytes[4];
+ spare_ptr->tb5 = tu->as_bytes[5];
+ spare_ptr->tb6 = tu->as_bytes[6];
+ spare_ptr->tb7 = tu->as_bytes[7];
+}
+
+static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
+ struct yaffs_spare *spare_ptr,
+ struct yaffs_tags *tags_ptr)
+{
+ union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
+ int result;
+
+ tu->as_bytes[0] = spare_ptr->tb0;
+ tu->as_bytes[1] = spare_ptr->tb1;
+ tu->as_bytes[2] = spare_ptr->tb2;
+ tu->as_bytes[3] = spare_ptr->tb3;
+ tu->as_bytes[4] = spare_ptr->tb4;
+ tu->as_bytes[5] = spare_ptr->tb5;
+ tu->as_bytes[6] = spare_ptr->tb6;
+ tu->as_bytes[7] = spare_ptr->tb7;
+
+ result = yaffs_check_tags_ecc(tags_ptr);
+ if (result > 0)
+ dev->n_tags_ecc_fixed++;
+ else if (result < 0)
+ dev->n_tags_ecc_unfixed++;
+}
+
+static void yaffs_spare_init(struct yaffs_spare *spare)
+{
+ memset(spare, 0xff, sizeof(struct yaffs_spare));
+}
+
+static int yaffs_wr_nand(struct yaffs_dev *dev,
+ int nand_chunk, const u8 *data,
+ struct yaffs_spare *spare)
+{
+ if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>> yaffs chunk %d is not valid",
+ nand_chunk);
+ return YAFFS_FAIL;
+ }
+
+ return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
+}
+
+static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data,
+ struct yaffs_spare *spare,
+ enum yaffs_ecc_result *ecc_result,
+ int correct_errors)
+{
+ int ret_val;
+ struct yaffs_spare local_spare;
+
+ if (!spare) {
+ /* If we don't have a real spare, then we use a local one. */
+ /* Need this for the calculation of the ecc */
+ spare = &local_spare;
+ }
+
+ if (!dev->param.use_nand_ecc) {
+ ret_val =
+ dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
+ if (data && correct_errors) {
+ /* Do ECC correction */
+ /* Todo handle any errors */
+ int ecc_result1, ecc_result2;
+ u8 calc_ecc[3];
+
+ yaffs_ecc_calc(data, calc_ecc);
+ ecc_result1 =
+ yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
+ yaffs_ecc_calc(&data[256], calc_ecc);
+ ecc_result2 =
+ yaffs_ecc_correct(&data[256], spare->ecc2,
+ calc_ecc);
+
+ if (ecc_result1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_fixed++;
+ } else if (ecc_result2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>yaffs ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ dev->n_ecc_unfixed++;
+ }
+
+ if (ecc_result1 || ecc_result2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (ecc_result1 < 0 || ecc_result2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (ecc_result1 > 0 || ecc_result2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+ }
+ } else {
+ /* Must allocate enough memory for spare+2*sizeof(int) */
+ /* for ecc results from device. */
+ struct yaffs_nand_spare nspare;
+
+ memset(&nspare, 0, sizeof(nspare));
+
+ ret_val = dev->param.read_chunk_fn(dev, nand_chunk, data,
+ (struct yaffs_spare *)
+ &nspare);
+ memcpy(spare, &nspare, sizeof(struct yaffs_spare));
+ if (data && correct_errors) {
+ if (nspare.eccres1 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:0",
+ nand_chunk);
+ } else if (nspare.eccres1 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:0",
+ nand_chunk);
+ }
+
+ if (nspare.eccres2 > 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error fix performed on chunk %d:1",
+ nand_chunk);
+ } else if (nspare.eccres2 < 0) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "**>>mtd ecc error unfixed on chunk %d:1",
+ nand_chunk);
+ }
+
+ if (nspare.eccres1 || nspare.eccres2) {
+ /* We had a data problem on this page */
+ yaffs_handle_rd_data_error(dev, nand_chunk);
+ }
+
+ if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
+ *ecc_result = YAFFS_ECC_RESULT_UNFIXED;
+ else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
+ *ecc_result = YAFFS_ECC_RESULT_FIXED;
+ else
+ *ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
+
+ }
+ }
+ return ret_val;
+}
+
+/*
+ * Functions for robustisizing
+ */
+
+static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
+{
+ int flash_block = nand_chunk / dev->param.chunks_per_block;
+
+ /* Mark the block for retirement */
+ yaffs_get_block_info(dev, flash_block + dev->block_offset)->
+ needs_retiring = 1;
+ yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ "**>>Block %d marked for retirement",
+ flash_block);
+
+ /* TODO:
+ * Just do a garbage collection on the affected block
+ * then retire the block
+ * NB recursion
+ */
+}
+
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+
+ yaffs_spare_init(&spare);
+
+ if (ext_tags->is_deleted)
+ spare.page_status = 0;
+ else {
+ tags.obj_id = ext_tags->obj_id;
+ tags.chunk_id = ext_tags->chunk_id;
+
+ tags.n_bytes_lsb = ext_tags->n_bytes & (1024 - 1);
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
+ else
+ tags.n_bytes_msb = 3;
+
+ tags.serial_number = ext_tags->serial_number;
+
+ if (!dev->param.use_nand_ecc && data)
+ yaffs_calc_ecc(data, &spare);
+
+ yaffs_load_tags_to_spare(&spare, &tags);
+ }
+ return yaffs_wr_nand(dev, nand_chunk, data, &spare);
+}
+
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *ext_tags)
+{
+ struct yaffs_spare spare;
+ struct yaffs_tags tags;
+ enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ int deleted;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ if (!yaffs_rd_chunk_nand(dev, nand_chunk,
+ data, &spare, &ecc_result, 1))
+ return YAFFS_FAIL;
+
+ /* ext_tags may be NULL */
+ if (!ext_tags)
+ return YAFFS_OK;
+
+ deleted = (hweight8(spare.page_status) < 7) ? 1 : 0;
+
+ ext_tags->is_deleted = deleted;
+ ext_tags->ecc_result = ecc_result;
+ ext_tags->block_bad = 0; /* We're reading it */
+ /* therefore it is not a bad block */
+ ext_tags->chunk_used =
+ memcmp(&spare_ff, &spare, sizeof(spare_ff)) ? 1 : 0;
+
+ if (ext_tags->chunk_used) {
+ yaffs_get_tags_from_spare(dev, &spare, &tags);
+ ext_tags->obj_id = tags.obj_id;
+ ext_tags->chunk_id = tags.chunk_id;
+ ext_tags->n_bytes = tags.n_bytes_lsb;
+
+ if (dev->data_bytes_per_chunk >= 1024)
+ ext_tags->n_bytes |=
+ (((unsigned)tags.n_bytes_msb) << 10);
+
+ ext_tags->serial_number = tags.serial_number;
+ }
+
+ return YAFFS_OK;
+}
+
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
+{
+ struct yaffs_spare spare;
+
+ memset(&spare, 0xff, sizeof(struct yaffs_spare));
+
+ spare.block_status = 'Y';
+
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
+ &spare);
+ yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
+ NULL, &spare);
+
+ return YAFFS_OK;
+}
+
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number)
+{
+ struct yaffs_spare spare0, spare1;
+ static struct yaffs_spare spare_ff;
+ static int init;
+ enum yaffs_ecc_result dummy;
+
+ if (!init) {
+ memset(&spare_ff, 0xff, sizeof(spare_ff));
+ init = 1;
+ }
+
+ *seq_number = 0;
+
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
+ &spare0, &dummy, 1);
+ yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
+ NULL, &spare1, &dummy, 1);
+
+ if (hweight8(spare0.block_status & spare1.block_status) < 7)
+ *state = YAFFS_BLOCK_STATE_DEAD;
+ else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
+ *state = YAFFS_BLOCK_STATE_EMPTY;
+ else
+ *state = YAFFS_BLOCK_STATE_NEEDS_SCAN;
+
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.h
new file mode 100644
index 000000000..b3c665577
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_tagscompat.h
@@ -0,0 +1,36 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_TAGSCOMPAT_H__
+#define __YAFFS_TAGSCOMPAT_H__
+
+#include "yaffs_guts.h"
+int yaffs_tags_compat_wr(struct yaffs_dev *dev,
+ int nand_chunk,
+ const u8 *data, const struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_rd(struct yaffs_dev *dev,
+ int nand_chunk,
+ u8 *data, struct yaffs_ext_tags *tags);
+int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int block_no);
+int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
+ int block_no,
+ enum yaffs_block_state *state,
+ u32 *seq_number);
+
+void yaffs_calc_tags_ecc(struct yaffs_tags *tags);
+int yaffs_check_tags_ecc(struct yaffs_tags *tags);
+int yaffs_count_bits(u8 byte);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_trace.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_trace.h
new file mode 100644
index 000000000..fd26054d3
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_trace.h
@@ -0,0 +1,57 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YTRACE_H__
+#define __YTRACE_H__
+
+extern unsigned int yaffs_trace_mask;
+extern unsigned int yaffs_wr_attempts;
+
+/*
+ * Tracing flags.
+ * The flags masked in YAFFS_TRACE_ALWAYS are always traced.
+ */
+
+#define YAFFS_TRACE_OS 0x00000002
+#define YAFFS_TRACE_ALLOCATE 0x00000004
+#define YAFFS_TRACE_SCAN 0x00000008
+#define YAFFS_TRACE_BAD_BLOCKS 0x00000010
+#define YAFFS_TRACE_ERASE 0x00000020
+#define YAFFS_TRACE_GC 0x00000040
+#define YAFFS_TRACE_WRITE 0x00000080
+#define YAFFS_TRACE_TRACING 0x00000100
+#define YAFFS_TRACE_DELETION 0x00000200
+#define YAFFS_TRACE_BUFFERS 0x00000400
+#define YAFFS_TRACE_NANDACCESS 0x00000800
+#define YAFFS_TRACE_GC_DETAIL 0x00001000
+#define YAFFS_TRACE_SCAN_DEBUG 0x00002000
+#define YAFFS_TRACE_MTD 0x00004000
+#define YAFFS_TRACE_CHECKPOINT 0x00008000
+
+#define YAFFS_TRACE_VERIFY 0x00010000
+#define YAFFS_TRACE_VERIFY_NAND 0x00020000
+#define YAFFS_TRACE_VERIFY_FULL 0x00040000
+#define YAFFS_TRACE_VERIFY_ALL 0x000f0000
+
+#define YAFFS_TRACE_SYNC 0x00100000
+#define YAFFS_TRACE_BACKGROUND 0x00200000
+#define YAFFS_TRACE_LOCK 0x00400000
+#define YAFFS_TRACE_MOUNT 0x00800000
+
+#define YAFFS_TRACE_ERROR 0x40000000
+#define YAFFS_TRACE_BUG 0x80000000
+#define YAFFS_TRACE_ALWAYS 0xf0000000
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_uboot_glue.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_uboot_glue.c
new file mode 100644
index 000000000..50000a135
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_uboot_glue.c
@@ -0,0 +1,465 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2007 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * yaffscfg.c The configuration for the "direct" use of yaffs.
+ *
+ * This is set up for u-boot.
+ *
+ * This version now uses the ydevconfig mechanism to set up partitions.
+ */
+
+#include <common.h>
+#include <div64.h>
+
+#include <config.h>
+#include "nand.h"
+#include "yaffscfg.h"
+#include "yaffsfs.h"
+#include "yaffs_packedtags2.h"
+#include "yaffs_mtdif.h"
+#include "yaffs_mtdif2.h"
+#if 0
+#include <errno.h>
+#else
+#include "malloc.h"
+#endif
+
+unsigned yaffs_trace_mask = 0x0; /* Disable logging */
+static int yaffs_errno;
+
+
+void yaffs_bug_fn(const char *fn, int n)
+{
+ printf("yaffs bug at %s:%d\n", fn, n);
+}
+
+void *yaffsfs_malloc(size_t x)
+{
+ return malloc(x);
+}
+
+void yaffsfs_free(void *x)
+{
+ free(x);
+}
+
+void yaffsfs_SetError(int err)
+{
+ yaffs_errno = err;
+}
+
+int yaffsfs_GetLastError(void)
+{
+ return yaffs_errno;
+}
+
+
+int yaffsfs_GetError(void)
+{
+ return yaffs_errno;
+}
+
+void yaffsfs_Lock(void)
+{
+}
+
+void yaffsfs_Unlock(void)
+{
+}
+
+__u32 yaffsfs_CurrentTime(void)
+{
+ return 0;
+}
+
+void *yaffs_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+void yaffs_free(void *ptr)
+{
+ free(ptr);
+}
+
+void yaffsfs_LocalInitialisation(void)
+{
+ /* No locking used */
+}
+
+
+static const char *yaffs_file_type_str(struct yaffs_stat *stat)
+{
+ switch (stat->st_mode & S_IFMT) {
+ case S_IFREG: return "regular file";
+ case S_IFDIR: return "directory";
+ case S_IFLNK: return "symlink";
+ default: return "unknown";
+ }
+}
+
+static const char *yaffs_error_str(void)
+{
+ int error = yaffsfs_GetLastError();
+
+ if (error < 0)
+ error = -error;
+
+ switch (error) {
+ case EBUSY: return "Busy";
+ case ENODEV: return "No such device";
+ case EINVAL: return "Invalid parameter";
+ case ENFILE: return "Too many open files";
+ case EBADF: return "Bad handle";
+ case EACCES: return "Wrong permissions";
+ case EXDEV: return "Not on same device";
+ case ENOENT: return "No such entry";
+ case ENOSPC: return "Device full";
+ case EROFS: return "Read only file system";
+ case ERANGE: return "Range error";
+ case ENOTEMPTY: return "Not empty";
+ case ENAMETOOLONG: return "Name too long";
+ case ENOMEM: return "Out of memory";
+ case EFAULT: return "Fault";
+ case EEXIST: return "Name exists";
+ case ENOTDIR: return "Not a directory";
+ case EISDIR: return "Not permitted on a directory";
+ case ELOOP: return "Symlink loop";
+ case 0: return "No error";
+ default: return "Unknown error";
+ }
+}
+
+extern nand_info_t nand_info[];
+
+void cmd_yaffs_tracemask(unsigned set, unsigned mask)
+{
+ if (set)
+ yaffs_trace_mask = mask;
+
+ printf("yaffs trace mask: %08x\n", yaffs_trace_mask);
+}
+
+static int yaffs_regions_overlap(int a, int b, int x, int y)
+{
+ return (a <= x && x <= b) ||
+ (a <= y && y <= b) ||
+ (x <= a && a <= y) ||
+ (x <= b && b <= y);
+}
+
+void cmd_yaffs_devconfig(char *_mp, int flash_dev,
+ int start_block, int end_block)
+{
+ struct mtd_info *mtd = NULL;
+ struct yaffs_dev *dev = NULL;
+ struct yaffs_dev *chk;
+ char *mp = NULL;
+ struct nand_chip *chip;
+
+ dev = calloc(1, sizeof(*dev));
+ mp = strdup(_mp);
+
+ mtd = &nand_info[flash_dev];
+
+ if (!dev || !mp) {
+ /* Alloc error */
+ printf("Failed to allocate memory\n");
+ goto err;
+ }
+
+ if (flash_dev >= CONFIG_SYS_MAX_NAND_DEVICE) {
+ printf("Flash device invalid\n");
+ goto err;
+ }
+
+ if (end_block == 0)
+ end_block = lldiv(mtd->size, mtd->erasesize - 1);
+
+ if (end_block < start_block) {
+ printf("Bad start/end\n");
+ goto err;
+ }
+
+ chip = mtd->priv;
+
+ /* Check for any conflicts */
+ yaffs_dev_rewind();
+ while (1) {
+ chk = yaffs_next_dev();
+ if (!chk)
+ break;
+ if (strcmp(chk->param.name, mp) == 0) {
+ printf("Mount point name already used\n");
+ goto err;
+ }
+ if (chk->driver_context == mtd &&
+ yaffs_regions_overlap(
+ chk->param.start_block, chk->param.end_block,
+ start_block, end_block)) {
+ printf("Region overlaps with partition %s\n",
+ chk->param.name);
+ goto err;
+ }
+
+ }
+
+ /* Seems sane, so configure */
+ memset(dev, 0, sizeof(*dev));
+ dev->param.name = mp;
+ dev->driver_context = mtd;
+ dev->param.start_block = start_block;
+ dev->param.end_block = end_block;
+ dev->param.chunks_per_block = mtd->erasesize / mtd->writesize;
+ dev->param.total_bytes_per_chunk = mtd->writesize;
+ dev->param.is_yaffs2 = 1;
+ dev->param.use_nand_ecc = 1;
+ dev->param.n_reserved_blocks = 5;
+ if (chip->ecc.layout->oobavail < sizeof(struct yaffs_packed_tags2))
+ dev->param.inband_tags = 1;
+ dev->param.n_caches = 10;
+ dev->param.write_chunk_tags_fn = nandmtd2_write_chunk_tags;
+ dev->param.read_chunk_tags_fn = nandmtd2_read_chunk_tags;
+ dev->param.erase_fn = nandmtd_EraseBlockInNAND;
+ dev->param.initialise_flash_fn = nandmtd_InitialiseNAND;
+ dev->param.bad_block_fn = nandmtd2_MarkNANDBlockBad;
+ dev->param.query_block_fn = nandmtd2_QueryNANDBlock;
+
+ yaffs_add_device(dev);
+
+ printf("Configures yaffs mount %s: dev %d start block %d, end block %d %s\n",
+ mp, flash_dev, start_block, end_block,
+ dev->param.inband_tags ? "using inband tags" : "");
+ return;
+
+err:
+ free(dev);
+ free(mp);
+}
+
+void cmd_yaffs_dev_ls(void)
+{
+ struct yaffs_dev *dev;
+ int flash_dev;
+ int free_space;
+
+ yaffs_dev_rewind();
+
+ while (1) {
+ dev = yaffs_next_dev();
+ if (!dev)
+ return;
+ flash_dev =
+ ((unsigned) dev->driver_context - (unsigned) nand_info)/
+ sizeof(nand_info[0]);
+ printf("%-10s %5d 0x%05x 0x%05x %s",
+ dev->param.name, flash_dev,
+ dev->param.start_block, dev->param.end_block,
+ dev->param.inband_tags ? "using inband tags, " : "");
+
+ free_space = yaffs_freespace(dev->param.name);
+ if (free_space < 0)
+ printf("not mounted\n");
+ else
+ printf("free 0x%x\n", free_space);
+
+ }
+}
+
+void make_a_file(char *yaffsName, char bval, int sizeOfFile)
+{
+ int outh;
+ int i;
+ unsigned char buffer[100];
+
+ outh = yaffs_open(yaffsName,
+ O_CREAT | O_RDWR | O_TRUNC,
+ S_IREAD | S_IWRITE);
+ if (outh < 0) {
+ printf("Error opening file: %d. %s\n", outh, yaffs_error_str());
+ return;
+ }
+
+ memset(buffer, bval, 100);
+
+ do {
+ i = sizeOfFile;
+ if (i > 100)
+ i = 100;
+ sizeOfFile -= i;
+
+ yaffs_write(outh, buffer, i);
+
+ } while (sizeOfFile > 0);
+
+
+ yaffs_close(outh);
+}
+
+void read_a_file(char *fn)
+{
+ int h;
+ int i = 0;
+ unsigned char b;
+
+ h = yaffs_open(fn, O_RDWR, 0);
+ if (h < 0) {
+ printf("File not found\n");
+ return;
+ }
+
+ while (yaffs_read(h, &b, 1) > 0) {
+ printf("%02x ", b);
+ i++;
+ if (i > 32) {
+ printf("\n");
+ i = 0;;
+ }
+ }
+ printf("\n");
+ yaffs_close(h);
+}
+
+void cmd_yaffs_mount(char *mp)
+{
+ int retval = yaffs_mount(mp);
+ if (retval < 0)
+ printf("Error mounting %s, return value: %d, %s\n", mp,
+ yaffsfs_GetError(), yaffs_error_str());
+}
+
+
+void cmd_yaffs_umount(char *mp)
+{
+ if (yaffs_unmount(mp) == -1)
+ printf("Error umounting %s, return value: %d, %s\n", mp,
+ yaffsfs_GetError(), yaffs_error_str());
+}
+
+void cmd_yaffs_write_file(char *yaffsName, char bval, int sizeOfFile)
+{
+ make_a_file(yaffsName, bval, sizeOfFile);
+}
+
+
+void cmd_yaffs_read_file(char *fn)
+{
+ read_a_file(fn);
+}
+
+
+void cmd_yaffs_mread_file(char *fn, char *addr)
+{
+ int h;
+ struct yaffs_stat s;
+
+ yaffs_stat(fn, &s);
+
+ printf("Copy %s to 0x%p... ", fn, addr);
+ h = yaffs_open(fn, O_RDWR, 0);
+ if (h < 0) {
+ printf("File not found\n");
+ return;
+ }
+
+ yaffs_read(h, addr, (int)s.st_size);
+ printf("\t[DONE]\n");
+
+ yaffs_close(h);
+}
+
+
+void cmd_yaffs_mwrite_file(char *fn, char *addr, int size)
+{
+ int outh;
+
+ outh = yaffs_open(fn, O_CREAT | O_RDWR | O_TRUNC, S_IREAD | S_IWRITE);
+ if (outh < 0)
+ printf("Error opening file: %d, %s\n", outh, yaffs_error_str());
+
+ yaffs_write(outh, addr, size);
+
+ yaffs_close(outh);
+}
+
+
+void cmd_yaffs_ls(const char *mountpt, int longlist)
+{
+ int i;
+ yaffs_DIR *d;
+ struct yaffs_dirent *de;
+ struct yaffs_stat stat;
+ char tempstr[255];
+
+ d = yaffs_opendir(mountpt);
+
+ if (!d) {
+ printf("opendir failed, %s\n", yaffs_error_str());
+ return;
+ }
+
+ for (i = 0; (de = yaffs_readdir(d)) != NULL; i++) {
+ if (longlist) {
+ sprintf(tempstr, "%s/%s", mountpt, de->d_name);
+ yaffs_lstat(tempstr, &stat);
+ printf("%-25s\t%7ld",
+ de->d_name,
+ (long)stat.st_size);
+ printf(" %5d %s\n",
+ stat.st_ino,
+ yaffs_file_type_str(&stat));
+ } else {
+ printf("%s\n", de->d_name);
+ }
+ }
+
+ yaffs_closedir(d);
+}
+
+
+void cmd_yaffs_mkdir(const char *dir)
+{
+ int retval = yaffs_mkdir(dir, 0);
+
+ if (retval < 0)
+ printf("yaffs_mkdir returning error: %d, %s\n",
+ retval, yaffs_error_str());
+}
+
+void cmd_yaffs_rmdir(const char *dir)
+{
+ int retval = yaffs_rmdir(dir);
+
+ if (retval < 0)
+ printf("yaffs_rmdir returning error: %d, %s\n",
+ retval, yaffs_error_str());
+}
+
+void cmd_yaffs_rm(const char *path)
+{
+ int retval = yaffs_unlink(path);
+
+ if (retval < 0)
+ printf("yaffs_unlink returning error: %d, %s\n",
+ retval, yaffs_error_str());
+}
+
+void cmd_yaffs_mv(const char *oldPath, const char *newPath)
+{
+ int retval = yaffs_rename(newPath, oldPath);
+
+ if (retval < 0)
+ printf("yaffs_unlink returning error: %d, %s\n",
+ retval, yaffs_error_str());
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_verify.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_verify.c
new file mode 100644
index 000000000..97734a9e2
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_verify.c
@@ -0,0 +1,526 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_verify.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+
+int yaffs_skip_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask &
+ (YAFFS_TRACE_VERIFY | YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_full_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_FULL));
+}
+
+static int yaffs_skip_nand_verification(struct yaffs_dev *dev)
+{
+ dev = dev;
+ return !(yaffs_trace_mask & (YAFFS_TRACE_VERIFY_NAND));
+}
+
+static const char * const block_state_name[] = {
+ "Unknown",
+ "Needs scan",
+ "Scanning",
+ "Empty",
+ "Allocating",
+ "Full",
+ "Dirty",
+ "Checkpoint",
+ "Collecting",
+ "Dead"
+};
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi, int n)
+{
+ int actually_used;
+ int in_use;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Report illegal runtime states */
+ if (bi->block_state >= YAFFS_NUMBER_OF_BLOCK_STATES)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has undefined state %d",
+ n, bi->block_state);
+
+ switch (bi->block_state) {
+ case YAFFS_BLOCK_STATE_UNKNOWN:
+ case YAFFS_BLOCK_STATE_SCANNING:
+ case YAFFS_BLOCK_STATE_NEEDS_SCAN:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has bad run-state %s",
+ n, block_state_name[bi->block_state]);
+ }
+
+ /* Check pages in use and soft deletions are legal */
+
+ actually_used = bi->pages_in_use - bi->soft_del_pages;
+
+ if (bi->pages_in_use < 0 ||
+ bi->pages_in_use > dev->param.chunks_per_block ||
+ bi->soft_del_pages < 0 ||
+ bi->soft_del_pages > dev->param.chunks_per_block ||
+ actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has illegal values pages_in_used %d soft_del_pages %d",
+ n, bi->pages_in_use, bi->soft_del_pages);
+
+ /* Check chunk bitmap legal */
+ in_use = yaffs_count_chunk_bits(dev, n);
+ if (in_use != bi->pages_in_use)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Block %d has inconsistent values pages_in_use %d counted chunk bits %d",
+ n, bi->pages_in_use, in_use);
+}
+
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n)
+{
+ yaffs_verify_blk(dev, bi, n);
+
+ /* After collection the block should be in the erased state */
+
+ if (bi->block_state != YAFFS_BLOCK_STATE_COLLECTING &&
+ bi->block_state != YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Block %d is in state %d after gc, should be erased",
+ n, bi->block_state);
+ }
+}
+
+void yaffs_verify_blocks(struct yaffs_dev *dev)
+{
+ int i;
+ int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
+ int illegal_states = 0;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ memset(state_count, 0, sizeof(state_count));
+
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ struct yaffs_block_info *bi = yaffs_get_block_info(dev, i);
+ yaffs_verify_blk(dev, bi, i);
+
+ if (bi->block_state < YAFFS_NUMBER_OF_BLOCK_STATES)
+ state_count[bi->block_state]++;
+ else
+ illegal_states++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_VERIFY, "Block summary");
+
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%d blocks have illegal states",
+ illegal_states);
+ if (state_count[YAFFS_BLOCK_STATE_ALLOCATING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many allocating blocks");
+
+ for (i = 0; i < YAFFS_NUMBER_OF_BLOCK_STATES; i++)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "%s %d blocks",
+ block_state_name[i], state_count[i]);
+
+ if (dev->blocks_in_checkpt != state_count[YAFFS_BLOCK_STATE_CHECKPOINT])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Checkpoint block count wrong dev %d count %d",
+ dev->blocks_in_checkpt,
+ state_count[YAFFS_BLOCK_STATE_CHECKPOINT]);
+
+ if (dev->n_erased_blocks != state_count[YAFFS_BLOCK_STATE_EMPTY])
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Erased block count wrong dev %d count %d",
+ dev->n_erased_blocks,
+ state_count[YAFFS_BLOCK_STATE_EMPTY]);
+
+ if (state_count[YAFFS_BLOCK_STATE_COLLECTING] > 1)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Too many collecting blocks %d (max is 1)",
+ state_count[YAFFS_BLOCK_STATE_COLLECTING]);
+}
+
+/*
+ * Verify the object header. oh must be valid, but obj and tags may be NULL in
+ * which case those tests will not be performed.
+ */
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!(tags && obj && oh)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Verifying object header tags %p obj %p oh %p",
+ tags, obj, oh);
+ return;
+ }
+
+ if (oh->type <= YAFFS_OBJECT_TYPE_UNKNOWN ||
+ oh->type > YAFFS_OBJECT_TYPE_MAX)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header type is illegal value 0x%x",
+ tags->obj_id, oh->type);
+
+ if (tags->obj_id != obj->obj_id)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch obj_id %d",
+ tags->obj_id, obj->obj_id);
+
+ /*
+ * Check that the object's parent ids match if parent_check requested.
+ *
+ * Tests do not apply to the root object.
+ */
+
+ if (parent_check && tags->obj_id > 1 && !obj->parent)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d obj->parent is NULL",
+ tags->obj_id, oh->parent_obj_id);
+
+ if (parent_check && obj->parent &&
+ oh->parent_obj_id != obj->parent->obj_id &&
+ (oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED ||
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header mismatch parent_id %d parent_obj_id %d",
+ tags->obj_id, oh->parent_obj_id,
+ obj->parent->obj_id);
+
+ if (tags->obj_id > 1 && oh->name[0] == 0) /* Null name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is NULL",
+ obj->obj_id);
+
+ if (tags->obj_id > 1 && ((u8) (oh->name[0])) == 0xff) /* Junk name */
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d header name is 0xff",
+ obj->obj_id);
+}
+
+void yaffs_verify_file(struct yaffs_obj *obj)
+{
+ u32 x;
+ int required_depth;
+ int last_chunk;
+ u32 offset_in_chunk;
+ u32 the_chunk;
+
+ u32 i;
+ struct yaffs_dev *dev;
+ struct yaffs_ext_tags tags;
+ struct yaffs_tnode *tn;
+ u32 obj_id;
+
+ if (!obj)
+ return;
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ dev = obj->my_dev;
+ obj_id = obj->obj_id;
+
+
+ /* Check file size is consistent with tnode depth */
+ yaffs_addr_to_chunk(dev, obj->variant.file_variant.file_size,
+ &last_chunk, &offset_in_chunk);
+ last_chunk++;
+ x = last_chunk >> YAFFS_TNODES_LEVEL0_BITS;
+ required_depth = 0;
+ while (x > 0) {
+ x >>= YAFFS_TNODES_INTERNAL_BITS;
+ required_depth++;
+ }
+
+ /* Check that the chunks in the tnode tree are all correct.
+ * We do this by scanning through the tnode tree and
+ * checking the tags for every chunk match.
+ */
+
+ if (yaffs_skip_nand_verification(dev))
+ return;
+
+ for (i = 1; i <= last_chunk; i++) {
+ tn = yaffs_find_tnode_0(dev, &obj->variant.file_variant, i);
+
+ if (!tn)
+ continue;
+
+ the_chunk = yaffs_get_group_base(dev, tn, i);
+ if (the_chunk > 0) {
+ yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL,
+ &tags);
+ if (tags.obj_id != obj_id || tags.chunk_id != i)
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Object %d chunk_id %d NAND mismatch chunk %d tags (%d:%d)",
+ obj_id, i, the_chunk,
+ tags.obj_id, tags.chunk_id);
+ }
+ }
+}
+
+void yaffs_verify_link(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify sane equivalent object */
+}
+
+void yaffs_verify_symlink(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+
+ /* Verify symlink string */
+}
+
+void yaffs_verify_special(struct yaffs_obj *obj)
+{
+ if (obj && yaffs_skip_verification(obj->my_dev))
+ return;
+}
+
+void yaffs_verify_obj(struct yaffs_obj *obj)
+{
+ struct yaffs_dev *dev;
+ u32 chunk_min;
+ u32 chunk_max;
+ u32 chunk_id_ok;
+ u32 chunk_in_range;
+ u32 chunk_wrongly_deleted;
+ u32 chunk_valid;
+
+ if (!obj)
+ return;
+
+ if (obj->being_created)
+ return;
+
+ dev = obj->my_dev;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Check sane object header chunk */
+
+ chunk_min = dev->internal_start_block * dev->param.chunks_per_block;
+ chunk_max =
+ (dev->internal_end_block + 1) * dev->param.chunks_per_block - 1;
+
+ chunk_in_range = (((unsigned)(obj->hdr_chunk)) >= chunk_min &&
+ ((unsigned)(obj->hdr_chunk)) <= chunk_max);
+ chunk_id_ok = chunk_in_range || (obj->hdr_chunk == 0);
+ chunk_valid = chunk_in_range &&
+ yaffs_check_chunk_bit(dev,
+ obj->hdr_chunk / dev->param.chunks_per_block,
+ obj->hdr_chunk % dev->param.chunks_per_block);
+ chunk_wrongly_deleted = chunk_in_range && !chunk_valid;
+
+ if (!obj->fake && (!chunk_id_ok || chunk_wrongly_deleted))
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has chunk_id %d %s %s",
+ obj->obj_id, obj->hdr_chunk,
+ chunk_id_ok ? "" : ",out of range",
+ chunk_wrongly_deleted ? ",marked as deleted" : "");
+
+ if (chunk_valid && !yaffs_skip_nand_verification(dev)) {
+ struct yaffs_ext_tags tags;
+ struct yaffs_obj_hdr *oh;
+ u8 *buffer = yaffs_get_temp_buffer(dev);
+
+ oh = (struct yaffs_obj_hdr *)buffer;
+
+ yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, buffer, &tags);
+
+ yaffs_verify_oh(obj, oh, &tags, 1);
+
+ yaffs_release_temp_buffer(dev, buffer);
+ }
+
+ /* Verify it has a parent */
+ if (obj && !obj->fake && (!obj->parent || obj->parent->my_dev != dev)) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has parent pointer %p which does not look like an object",
+ obj->obj_id, obj->parent);
+ }
+
+ /* Verify parent is a directory */
+ if (obj->parent &&
+ obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d's parent is not a directory (type %d)",
+ obj->obj_id, obj->parent->variant_type);
+ }
+
+ switch (obj->variant_type) {
+ case YAFFS_OBJECT_TYPE_FILE:
+ yaffs_verify_file(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ yaffs_verify_symlink(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ yaffs_verify_dir(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ yaffs_verify_link(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ yaffs_verify_special(obj);
+ break;
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ default:
+ yaffs_trace(YAFFS_TRACE_VERIFY,
+ "Obj %d has illegaltype %d",
+ obj->obj_id, obj->variant_type);
+ break;
+ }
+}
+
+void yaffs_verify_objects(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ int i;
+ struct list_head *lh;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ /* Iterate through the objects in each hash entry */
+
+ for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ yaffs_verify_obj(obj);
+ }
+ }
+}
+
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+ int count = 0;
+
+ if (!obj) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "No object to verify");
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_verification(obj->my_dev))
+ return;
+
+ if (!obj->parent) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Object does not have parent");
+ BUG();
+ return;
+ }
+
+ if (obj->parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS, "Parent is not directory");
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &obj->parent->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ yaffs_verify_obj(list_obj);
+ if (obj == list_obj)
+ count++;
+ }
+
+ if (count != 1) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory %d times",
+ count);
+ BUG();
+ }
+}
+
+void yaffs_verify_dir(struct yaffs_obj *directory)
+{
+ struct list_head *lh;
+ struct yaffs_obj *list_obj;
+
+ if (!directory) {
+ BUG();
+ return;
+ }
+
+ if (yaffs_skip_full_verification(directory->my_dev))
+ return;
+
+ if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Directory has wrong type: %d",
+ directory->variant_type);
+ BUG();
+ }
+
+ /* Iterate through the objects in each hash entry */
+
+ list_for_each(lh, &directory->variant.dir_variant.children) {
+ list_obj = list_entry(lh, struct yaffs_obj, siblings);
+ if (list_obj->parent != directory) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Object in directory list has wrong parent %p",
+ list_obj->parent);
+ BUG();
+ }
+ yaffs_verify_obj_in_dir(list_obj);
+ }
+}
+
+static int yaffs_free_verification_failures;
+
+void yaffs_verify_free_chunks(struct yaffs_dev *dev)
+{
+ int counted;
+ int difference;
+
+ if (yaffs_skip_verification(dev))
+ return;
+
+ counted = yaffs_count_free_chunks(dev);
+
+ difference = dev->n_free_chunks - counted;
+
+ if (difference) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Freechunks verification failure %d %d %d",
+ dev->n_free_chunks, counted, difference);
+ yaffs_free_verification_failures++;
+ }
+}
+
+int yaffs_verify_file_sane(struct yaffs_obj *in)
+{
+ in = in;
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_verify.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_verify.h
new file mode 100644
index 000000000..4f4af8d29
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_verify.h
@@ -0,0 +1,43 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_VERIFY_H__
+#define __YAFFS_VERIFY_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_verify_blk(struct yaffs_dev *dev, struct yaffs_block_info *bi,
+ int n);
+void yaffs_verify_collected_blk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi, int n);
+void yaffs_verify_blocks(struct yaffs_dev *dev);
+
+void yaffs_verify_oh(struct yaffs_obj *obj, struct yaffs_obj_hdr *oh,
+ struct yaffs_ext_tags *tags, int parent_check);
+void yaffs_verify_file(struct yaffs_obj *obj);
+void yaffs_verify_link(struct yaffs_obj *obj);
+void yaffs_verify_symlink(struct yaffs_obj *obj);
+void yaffs_verify_special(struct yaffs_obj *obj);
+void yaffs_verify_obj(struct yaffs_obj *obj);
+void yaffs_verify_objects(struct yaffs_dev *dev);
+void yaffs_verify_obj_in_dir(struct yaffs_obj *obj);
+void yaffs_verify_dir(struct yaffs_obj *directory);
+void yaffs_verify_free_chunks(struct yaffs_dev *dev);
+
+int yaffs_verify_file_sane(struct yaffs_obj *obj);
+
+int yaffs_skip_verification(struct yaffs_dev *dev);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.c
new file mode 100644
index 000000000..357d8f75d
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.c
@@ -0,0 +1,419 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_yaffs1.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_nand.h"
+#include "yaffs_attribs.h"
+
+int yaffs1_scan(struct yaffs_dev *dev)
+{
+ struct yaffs_ext_tags tags;
+ int blk;
+ int chunk;
+ int c;
+ int deleted;
+ enum yaffs_block_state state;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int alloc_failed = 0;
+ struct yaffs_shadow_fixer *shadow_fixers = NULL;
+ u8 *chunk_data;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs1_scan starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, state, seq_number);
+
+ if (state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ }
+ bi++;
+ }
+
+ /* For each block.... */
+ for (blk = dev->internal_start_block;
+ !alloc_failed && blk <= dev->internal_end_block; blk++) {
+
+ cond_resched();
+
+ bi = yaffs_get_block_info(dev, blk);
+ state = bi->block_state;
+
+ deleted = 0;
+
+ /* For each chunk in each block that needs scanning.... */
+ for (c = 0;
+ !alloc_failed && c < dev->param.chunks_per_block &&
+ state == YAFFS_BLOCK_STATE_NEEDS_SCAN; c++) {
+ /* Read the tags and decide what to do */
+ chunk = blk * dev->param.chunks_per_block + c;
+
+ yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+
+ /* Let's have a good look at this chunk... */
+
+ if (tags.ecc_result == YAFFS_ECC_RESULT_UNFIXED ||
+ tags.is_deleted) {
+ /* YAFFS1 only...
+ * A deleted chunk
+ */
+ deleted++;
+ dev->n_free_chunks++;
+ } else if (!tags.chunk_used) {
+ /* An unassigned chunk in the block
+ * This means that either the block is empty or
+ * this is the one being allocated from
+ */
+
+ if (c == 0) {
+ /* We're looking at the first chunk in
+ *the block so the block is unused */
+ state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ /* this is the block being allocated */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, c);
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = c;
+ dev->alloc_block_finder = blk;
+
+ }
+
+ dev->n_free_chunks +=
+ (dev->param.chunks_per_block - c);
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ unsigned int endpos;
+
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ /* PutChunkIntoFile checks for a clash
+ * (two data chunks with the same chunk_id).
+ */
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in) {
+ if (!yaffs_put_chunk_in_file
+ (in, tags.chunk_id, chunk, 1))
+ alloc_failed = 1;
+ }
+
+ endpos =
+ (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk +
+ tags.n_bytes;
+ if (in &&
+ in->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE &&
+ in->variant.file_variant.scanned_size <
+ endpos) {
+ in->variant.file_variant.scanned_size =
+ endpos;
+ if (!dev->param.use_header_file_size) {
+ in->variant.
+ file_variant.file_size =
+ in->variant.
+ file_variant.scanned_size;
+ }
+
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Make the object
+ */
+ yaffs_set_chunk_bit(dev, blk, c);
+ bi->pages_in_use++;
+
+ yaffs_rd_chunk_tags_nand(dev, chunk,
+ chunk_data, NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ in = yaffs_find_by_number(dev, tags.obj_id);
+ if (in && in->variant_type != oh->type) {
+ /* This should not happen, but somehow
+ * Wev'e ended up with an obj_id that
+ * has been reused but not yet deleted,
+ * and worse still it has changed type.
+ * Delete the old object.
+ */
+
+ yaffs_del_obj(in);
+ in = NULL;
+ }
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ oh->type);
+
+ if (!in)
+ alloc_failed = 1;
+
+ if (in && oh->shadows_obj > 0) {
+
+ struct yaffs_shadow_fixer *fixer;
+ fixer =
+ kmalloc(sizeof
+ (struct yaffs_shadow_fixer),
+ GFP_NOFS);
+ if (fixer) {
+ fixer->next = shadow_fixers;
+ shadow_fixers = fixer;
+ fixer->obj_id = tags.obj_id;
+ fixer->shadowed_id =
+ oh->shadows_obj;
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Shadow fixer: %d shadows %d",
+ fixer->obj_id,
+ fixer->shadowed_id);
+
+ }
+
+ }
+
+ if (in && in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate and need to
+ * resolve it. */
+
+ unsigned existing_serial = in->serial;
+ unsigned new_serial =
+ tags.serial_number;
+
+ if (((existing_serial + 1) & 3) ==
+ new_serial) {
+ /* Use new one - destroy the
+ * exisiting one */
+ yaffs_chunk_del(dev,
+ in->hdr_chunk,
+ 1, __LINE__);
+ in->valid = 0;
+ } else {
+ /* Use existing - destroy
+ * this one. */
+ yaffs_chunk_del(dev, chunk, 1,
+ __LINE__);
+ }
+ }
+
+ if (in && !in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id ==
+ YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ } else if (in && !in->valid) {
+ /* we need to load this info */
+
+ in->valid = 1;
+ in->variant_type = oh->type;
+
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->hdr_chunk = chunk;
+ in->serial = tags.serial_number;
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ in->dirty = 0;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ parent =
+ yaffs_find_or_create_by_number
+ (dev, oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ if (!parent)
+ alloc_failed = 1;
+ if (parent && parent->variant_type ==
+ YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.
+ children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, a problem....
+ * We're trying to use a
+ * non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+
+ yaffs_add_obj_to_dir(parent, in);
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ if (dev->param.
+ use_header_file_size)
+ in->variant.
+ file_variant.file_size
+ = yaffs_oh_to_size(oh);
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ in->variant.
+ hardlink_variant.equiv_id =
+ oh->equiv_id;
+ list_add(&in->hard_links,
+ &hard_list);
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ in->variant.symlink_variant.
+ alias =
+ yaffs_clone_str(oh->alias);
+ if (!in->variant.
+ symlink_variant.alias)
+ alloc_failed = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning,
+ * then the block is fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ if (state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ /* If the block was partially allocated then
+ * treat it as fully allocated. */
+ state = YAFFS_BLOCK_STATE_FULL;
+ dev->alloc_block = -1;
+ }
+
+ bi->block_state = state;
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL)
+ yaffs_block_became_dirty(dev, blk);
+ }
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We should now have scanned all the objects, now it's time to add
+ * these hardlinks.
+ */
+
+ yaffs_link_fixup(dev, &hard_list);
+
+ /*
+ * Fix up any shadowed objects.
+ * There should not be more than one of these.
+ */
+ {
+ struct yaffs_shadow_fixer *fixer;
+ struct yaffs_obj *obj;
+
+ while (shadow_fixers) {
+ fixer = shadow_fixers;
+ shadow_fixers = fixer->next;
+ /* Complete the rename transaction by deleting the
+ * shadowed object then setting the object header
+ to unshadowed.
+ */
+ obj = yaffs_find_by_number(dev, fixer->shadowed_id);
+ if (obj)
+ yaffs_del_obj(obj);
+
+ obj = yaffs_find_by_number(dev, fixer->obj_id);
+
+ if (obj)
+ yaffs_update_oh(obj, NULL, 1, 0, 0, NULL);
+
+ kfree(fixer);
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs1_scan ends");
+
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.h
new file mode 100644
index 000000000..97e2fdd08
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs1.h
@@ -0,0 +1,22 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS1_H__
+#define __YAFFS_YAFFS1_H__
+
+#include "yaffs_guts.h"
+int yaffs1_scan(struct yaffs_dev *dev);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.c b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.c
new file mode 100644
index 000000000..f76dcaeeb
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.c
@@ -0,0 +1,1526 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "yaffs_guts.h"
+#include "yaffs_trace.h"
+#include "yaffs_yaffs2.h"
+#include "yaffs_checkptrw.h"
+#include "yaffs_bitmap.h"
+#include "yaffs_nand.h"
+#include "yaffs_getblockinfo.h"
+#include "yaffs_verify.h"
+#include "yaffs_attribs.h"
+#include "yaffs_summary.h"
+
+/*
+ * Checkpoints are really no benefit on very small partitions.
+ *
+ * To save space on small partitions don't bother with checkpoints unless
+ * the partition is at least this big.
+ */
+#define YAFFS_CHECKPOINT_MIN_BLOCKS 60
+#define YAFFS_SMALL_HOLE_THRESHOLD 4
+
+/*
+ * Oldest Dirty Sequence Number handling.
+ */
+
+/* yaffs_calc_oldest_dirty_seq()
+ * yaffs2_find_oldest_dirty_seq()
+ * Calculate the oldest dirty sequence number if we don't know it.
+ */
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ int i;
+ unsigned seq;
+ unsigned block_no = 0;
+ struct yaffs_block_info *b;
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ /* Find the oldest dirty sequence number. */
+ seq = dev->seq_number + 1;
+ b = dev->block_info;
+ for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
+ if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
+ (b->pages_in_use - b->soft_del_pages) <
+ dev->param.chunks_per_block &&
+ b->seq_number < seq) {
+ seq = b->seq_number;
+ block_no = i;
+ }
+ b++;
+ }
+
+ if (block_no) {
+ dev->oldest_dirty_seq = seq;
+ dev->oldest_dirty_block = block_no;
+ }
+}
+
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!dev->oldest_dirty_seq)
+ yaffs_calc_oldest_dirty_seq(dev);
+}
+
+/*
+ * yaffs_clear_oldest_dirty_seq()
+ * Called when a block is erased or marked bad. (ie. when its seq_number
+ * becomes invalid). If the value matches the oldest then we clear
+ * dev->oldest_dirty_seq to force its recomputation.
+ */
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (!bi || bi->seq_number == dev->oldest_dirty_seq) {
+ dev->oldest_dirty_seq = 0;
+ dev->oldest_dirty_block = 0;
+ }
+}
+
+/*
+ * yaffs2_update_oldest_dirty_seq()
+ * Update the oldest dirty sequence number whenever we dirty a block.
+ * Only do this if the oldest_dirty_seq is actually being tracked.
+ */
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi)
+{
+ if (!dev->param.is_yaffs2)
+ return;
+
+ if (dev->oldest_dirty_seq) {
+ if (dev->oldest_dirty_seq > bi->seq_number) {
+ dev->oldest_dirty_seq = bi->seq_number;
+ dev->oldest_dirty_block = block_no;
+ }
+ }
+}
+
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi)
+{
+
+ if (!dev->param.is_yaffs2)
+ return 1; /* disqualification only applies to yaffs2. */
+
+ if (!bi->has_shrink_hdr)
+ return 1; /* can gc */
+
+ yaffs2_find_oldest_dirty_seq(dev);
+
+ /* Can't do gc of this block if there are any blocks older than this
+ * one that have discarded pages.
+ */
+ return (bi->seq_number <= dev->oldest_dirty_seq);
+}
+
+/*
+ * yaffs2_find_refresh_block()
+ * periodically finds the oldest full block by sequence number for refreshing.
+ * Only for yaffs2.
+ */
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev)
+{
+ u32 b;
+ u32 oldest = 0;
+ u32 oldest_seq = 0;
+ struct yaffs_block_info *bi;
+
+ if (!dev->param.is_yaffs2)
+ return oldest;
+
+ /*
+ * If refresh period < 10 then refreshing is disabled.
+ */
+ if (dev->param.refresh_period < 10)
+ return oldest;
+
+ /*
+ * Fix broken values.
+ */
+ if (dev->refresh_skip > dev->param.refresh_period)
+ dev->refresh_skip = dev->param.refresh_period;
+
+ if (dev->refresh_skip > 0)
+ return oldest;
+
+ /*
+ * Refresh skip is now zero.
+ * We'll do a refresh this time around....
+ * Update the refresh skip and find the oldest block.
+ */
+ dev->refresh_skip = dev->param.refresh_period;
+ dev->refresh_count++;
+ bi = dev->block_info;
+ for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) {
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+
+ if (oldest < 1 || bi->seq_number < oldest_seq) {
+ oldest = b;
+ oldest_seq = bi->seq_number;
+ }
+ }
+ bi++;
+ }
+
+ if (oldest > 0) {
+ yaffs_trace(YAFFS_TRACE_GC,
+ "GC refresh count %d selected block %d with seq_number %d",
+ dev->refresh_count, oldest, oldest_seq);
+ }
+
+ return oldest;
+}
+
+int yaffs2_checkpt_required(struct yaffs_dev *dev)
+{
+ int nblocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ nblocks = dev->internal_end_block - dev->internal_start_block + 1;
+
+ return !dev->param.skip_checkpt_wr &&
+ !dev->read_only && (nblocks >= YAFFS_CHECKPOINT_MIN_BLOCKS);
+}
+
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev)
+{
+ int retval;
+ int n_bytes = 0;
+ int n_blocks;
+ int dev_blocks;
+
+ if (!dev->param.is_yaffs2)
+ return 0;
+
+ if (!dev->checkpoint_blocks_required && yaffs2_checkpt_required(dev)) {
+ /* Not a valid value so recalculate */
+ dev_blocks = dev->param.end_block - dev->param.start_block + 1;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(struct yaffs_checkpt_dev);
+ n_bytes += dev_blocks * sizeof(struct yaffs_block_info);
+ n_bytes += dev_blocks * dev->chunk_bit_stride;
+ n_bytes +=
+ (sizeof(struct yaffs_checkpt_obj) + sizeof(u32)) *
+ dev->n_obj;
+ n_bytes += (dev->tnode_size + sizeof(u32)) * dev->n_tnodes;
+ n_bytes += sizeof(struct yaffs_checkpt_validity);
+ n_bytes += sizeof(u32); /* checksum */
+
+ /* Round up and add 2 blocks to allow for some bad blocks,
+ * so add 3 */
+
+ n_blocks =
+ (n_bytes /
+ (dev->data_bytes_per_chunk *
+ dev->param.chunks_per_block)) + 3;
+
+ dev->checkpoint_blocks_required = n_blocks;
+ }
+
+ retval = dev->checkpoint_blocks_required - dev->blocks_in_checkpt;
+ if (retval < 0)
+ retval = 0;
+ return retval;
+}
+
+/*--------------------- Checkpointing --------------------*/
+
+static int yaffs2_wr_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.struct_type = sizeof(cp);
+ cp.magic = YAFFS_MAGIC;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
+ cp.head = (head) ? 1 : 0;
+
+ return (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp)) ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_validity_marker(struct yaffs_dev *dev, int head)
+{
+ struct yaffs_checkpt_validity cp;
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ if (ok)
+ ok = (cp.struct_type == sizeof(cp)) &&
+ (cp.magic == YAFFS_MAGIC) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
+ (cp.head == ((head) ? 1 : 0));
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_dev_to_checkpt_dev(struct yaffs_checkpt_dev *cp,
+ struct yaffs_dev *dev)
+{
+ cp->n_erased_blocks = dev->n_erased_blocks;
+ cp->alloc_block = dev->alloc_block;
+ cp->alloc_page = dev->alloc_page;
+ cp->n_free_chunks = dev->n_free_chunks;
+
+ cp->n_deleted_files = dev->n_deleted_files;
+ cp->n_unlinked_files = dev->n_unlinked_files;
+ cp->n_bg_deletions = dev->n_bg_deletions;
+ cp->seq_number = dev->seq_number;
+
+}
+
+static void yaffs_checkpt_dev_to_dev(struct yaffs_dev *dev,
+ struct yaffs_checkpt_dev *cp)
+{
+ dev->n_erased_blocks = cp->n_erased_blocks;
+ dev->alloc_block = cp->alloc_block;
+ dev->alloc_page = cp->alloc_page;
+ dev->n_free_chunks = cp->n_free_chunks;
+
+ dev->n_deleted_files = cp->n_deleted_files;
+ dev->n_unlinked_files = cp->n_unlinked_files;
+ dev->n_bg_deletions = cp->n_bg_deletions;
+ dev->seq_number = cp->seq_number;
+}
+
+static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ int ok;
+
+ /* Write device runtime values */
+ yaffs2_dev_to_checkpt_dev(&cp, dev);
+ cp.struct_type = sizeof(cp);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ /* Write block info */
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+ ok = (yaffs2_checkpt_wr(dev, dev->block_info, n_bytes) == n_bytes);
+ if (!ok)
+ return 0;
+
+ /* Write chunk bits */
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+ ok = (yaffs2_checkpt_wr(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
+{
+ struct yaffs_checkpt_dev cp;
+ u32 n_bytes;
+ u32 n_blocks =
+ (dev->internal_end_block - dev->internal_start_block + 1);
+ int ok;
+
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (!ok)
+ return 0;
+
+ if (cp.struct_type != sizeof(cp))
+ return 0;
+
+ yaffs_checkpt_dev_to_dev(dev, &cp);
+
+ n_bytes = n_blocks * sizeof(struct yaffs_block_info);
+
+ ok = (yaffs2_checkpt_rd(dev, dev->block_info, n_bytes) == n_bytes);
+
+ if (!ok)
+ return 0;
+
+ n_bytes = n_blocks * dev->chunk_bit_stride;
+
+ ok = (yaffs2_checkpt_rd(dev, dev->chunk_bits, n_bytes) == n_bytes);
+
+ return ok ? 1 : 0;
+}
+
+static void yaffs2_obj_checkpt_obj(struct yaffs_checkpt_obj *cp,
+ struct yaffs_obj *obj)
+{
+ cp->obj_id = obj->obj_id;
+ cp->parent_id = (obj->parent) ? obj->parent->obj_id : 0;
+ cp->hdr_chunk = obj->hdr_chunk;
+ cp->variant_type = obj->variant_type;
+ cp->deleted = obj->deleted;
+ cp->soft_del = obj->soft_del;
+ cp->unlinked = obj->unlinked;
+ cp->fake = obj->fake;
+ cp->rename_allowed = obj->rename_allowed;
+ cp->unlink_allowed = obj->unlink_allowed;
+ cp->serial = obj->serial;
+ cp->n_data_chunks = obj->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ cp->size_or_equiv_obj = obj->variant.file_variant.file_size;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ cp->size_or_equiv_obj = obj->variant.hardlink_variant.equiv_id;
+}
+
+static int yaffs2_checkpt_obj_to_obj(struct yaffs_obj *obj,
+ struct yaffs_checkpt_obj *cp)
+{
+ struct yaffs_obj *parent;
+
+ if (obj->variant_type != cp->variant_type) {
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "Checkpoint read object %d type %d chunk %d does not match existing object type %d",
+ cp->obj_id, cp->variant_type, cp->hdr_chunk,
+ obj->variant_type);
+ return 0;
+ }
+
+ obj->obj_id = cp->obj_id;
+
+ if (cp->parent_id)
+ parent = yaffs_find_or_create_by_number(obj->my_dev,
+ cp->parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ else
+ parent = NULL;
+
+ if (parent) {
+ if (parent->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) {
+ yaffs_trace(YAFFS_TRACE_ALWAYS,
+ "Checkpoint read object %d parent %d type %d chunk %d Parent type, %d, not directory",
+ cp->obj_id, cp->parent_id,
+ cp->variant_type, cp->hdr_chunk,
+ parent->variant_type);
+ return 0;
+ }
+ yaffs_add_obj_to_dir(parent, obj);
+ }
+
+ obj->hdr_chunk = cp->hdr_chunk;
+ obj->variant_type = cp->variant_type;
+ obj->deleted = cp->deleted;
+ obj->soft_del = cp->soft_del;
+ obj->unlinked = cp->unlinked;
+ obj->fake = cp->fake;
+ obj->rename_allowed = cp->rename_allowed;
+ obj->unlink_allowed = cp->unlink_allowed;
+ obj->serial = cp->serial;
+ obj->n_data_chunks = cp->n_data_chunks;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ obj->variant.file_variant.file_size = cp->size_or_equiv_obj;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK)
+ obj->variant.hardlink_variant.equiv_id = cp->size_or_equiv_obj;
+
+ if (obj->hdr_chunk > 0)
+ obj->lazy_loaded = 1;
+ return 1;
+}
+
+static int yaffs2_checkpt_tnode_worker(struct yaffs_obj *in,
+ struct yaffs_tnode *tn, u32 level,
+ int chunk_offset)
+{
+ int i;
+ struct yaffs_dev *dev = in->my_dev;
+ int ok = 1;
+ u32 base_offset;
+
+ if (!tn)
+ return 1;
+
+ if (level > 0) {
+ for (i = 0; i < YAFFS_NTNODES_INTERNAL && ok; i++) {
+ if (!tn->internal[i])
+ continue;
+ ok = yaffs2_checkpt_tnode_worker(in,
+ tn->internal[i],
+ level - 1,
+ (chunk_offset <<
+ YAFFS_TNODES_INTERNAL_BITS) + i);
+ }
+ return ok;
+ }
+
+ /* Level 0 tnode */
+ base_offset = chunk_offset << YAFFS_TNODES_LEVEL0_BITS;
+ ok = (yaffs2_checkpt_wr(dev, &base_offset, sizeof(base_offset)) ==
+ sizeof(base_offset));
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+
+ return ok;
+}
+
+static int yaffs2_wr_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 end_marker = ~0;
+ int ok = 1;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return ok;
+
+ ok = yaffs2_checkpt_tnode_worker(obj,
+ obj->variant.file_variant.top,
+ obj->variant.file_variant.
+ top_level, 0);
+ if (ok)
+ ok = (yaffs2_checkpt_wr(obj->my_dev, &end_marker,
+ sizeof(end_marker)) == sizeof(end_marker));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_tnodes(struct yaffs_obj *obj)
+{
+ u32 base_chunk;
+ int ok = 1;
+ struct yaffs_dev *dev = obj->my_dev;
+ struct yaffs_file_var *file_stuct_ptr = &obj->variant.file_variant;
+ struct yaffs_tnode *tn;
+ int nread = 0;
+
+ ok = (yaffs2_checkpt_rd(dev, &base_chunk, sizeof(base_chunk)) ==
+ sizeof(base_chunk));
+
+ while (ok && (~base_chunk)) {
+ nread++;
+ /* Read level 0 tnode */
+
+ tn = yaffs_get_tnode(dev);
+ if (tn)
+ ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
+ dev->tnode_size);
+ else
+ ok = 0;
+
+ if (tn && ok)
+ ok = yaffs_add_find_tnode_0(dev,
+ file_stuct_ptr,
+ base_chunk, tn) ? 1 : 0;
+
+ if (ok)
+ ok = (yaffs2_checkpt_rd
+ (dev, &base_chunk,
+ sizeof(base_chunk)) == sizeof(base_chunk));
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read tnodes %d records, last %d. ok %d",
+ nread, base_chunk, ok);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int i;
+ int ok = 1;
+ struct list_head *lh;
+
+ /* Iterate through the objects in each hash entry,
+ * dumping them to the checkpointing stream.
+ */
+
+ for (i = 0; ok && i < YAFFS_NOBJECT_BUCKETS; i++) {
+ list_for_each(lh, &dev->obj_bucket[i].list) {
+ obj = list_entry(lh, struct yaffs_obj, hash_link);
+ if (!obj->defered_free) {
+ yaffs2_obj_checkpt_obj(&cp, obj);
+ cp.struct_type = sizeof(cp);
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint write object %d parent %d type %d chunk %d obj addr %p",
+ cp.obj_id, cp.parent_id,
+ cp.variant_type, cp.hdr_chunk, obj);
+
+ ok = (yaffs2_checkpt_wr(dev, &cp,
+ sizeof(cp)) == sizeof(cp));
+
+ if (ok &&
+ obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE)
+ ok = yaffs2_wr_checkpt_tnodes(obj);
+ }
+ }
+ }
+
+ /* Dump end of list */
+ memset(&cp, 0xff, sizeof(struct yaffs_checkpt_obj));
+ cp.struct_type = sizeof(cp);
+
+ if (ok)
+ ok = (yaffs2_checkpt_wr(dev, &cp, sizeof(cp)) == sizeof(cp));
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_rd_checkpt_objs(struct yaffs_dev *dev)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_checkpt_obj cp;
+ int ok = 1;
+ int done = 0;
+ LIST_HEAD(hard_list);
+
+
+ while (ok && !done) {
+ ok = (yaffs2_checkpt_rd(dev, &cp, sizeof(cp)) == sizeof(cp));
+ if (cp.struct_type != sizeof(cp)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "struct size %d instead of %d ok %d",
+ cp.struct_type, (int)sizeof(cp), ok);
+ ok = 0;
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "Checkpoint read object %d parent %d type %d chunk %d ",
+ cp.obj_id, cp.parent_id, cp.variant_type,
+ cp.hdr_chunk);
+
+ if (ok && cp.obj_id == ~0) {
+ done = 1;
+ } else if (ok) {
+ obj =
+ yaffs_find_or_create_by_number(dev, cp.obj_id,
+ cp.variant_type);
+ if (obj) {
+ ok = yaffs2_checkpt_obj_to_obj(obj, &cp);
+ if (!ok)
+ break;
+ if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_FILE) {
+ ok = yaffs2_rd_checkpt_tnodes(obj);
+ } else if (obj->variant_type ==
+ YAFFS_OBJECT_TYPE_HARDLINK) {
+ list_add(&obj->hard_links, &hard_list);
+ }
+ } else {
+ ok = 0;
+ }
+ }
+ }
+
+ if (ok)
+ yaffs_link_fixup(dev, &hard_list);
+
+ return ok ? 1 : 0;
+}
+
+static int yaffs2_wr_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum);
+
+ ok = (yaffs2_checkpt_wr(dev, &checkpt_sum, sizeof(checkpt_sum)) ==
+ sizeof(checkpt_sum));
+
+ if (!ok)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_rd_checkpt_sum(struct yaffs_dev *dev)
+{
+ u32 checkpt_sum0;
+ u32 checkpt_sum1;
+ int ok;
+
+ yaffs2_get_checkpt_sum(dev, &checkpt_sum0);
+
+ ok = (yaffs2_checkpt_rd(dev, &checkpt_sum1, sizeof(checkpt_sum1)) ==
+ sizeof(checkpt_sum1));
+
+ if (!ok)
+ return 0;
+
+ if (checkpt_sum0 != checkpt_sum1)
+ return 0;
+
+ return 1;
+}
+
+static int yaffs2_wr_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!yaffs2_checkpt_required(dev)) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint write");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 1);
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint device");
+ ok = yaffs2_wr_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint objects");
+ ok = yaffs2_wr_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "write checkpoint validity");
+ ok = yaffs2_wr_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok)
+ ok = yaffs2_wr_checkpt_sum(dev);
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return dev->is_checkpointed;
+}
+
+static int yaffs2_rd_checkpt_data(struct yaffs_dev *dev)
+{
+ int ok = 1;
+
+ if (!dev->param.is_yaffs2)
+ ok = 0;
+
+ if (ok && dev->param.skip_checkpt_rd) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "skipping checkpoint read");
+ ok = 0;
+ }
+
+ if (ok)
+ ok = yaffs2_checkpt_open(dev, 0); /* open for read */
+
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 1);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint device");
+ ok = yaffs2_rd_checkpt_dev(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint objects");
+ ok = yaffs2_rd_checkpt_objs(dev);
+ }
+ if (ok) {
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint validity");
+ ok = yaffs2_rd_checkpt_validity_marker(dev, 0);
+ }
+
+ if (ok) {
+ ok = yaffs2_rd_checkpt_sum(dev);
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "read checkpoint checksum %d", ok);
+ }
+
+ if (!yaffs_checkpt_close(dev))
+ ok = 0;
+
+ if (ok)
+ dev->is_checkpointed = 1;
+ else
+ dev->is_checkpointed = 0;
+
+ return ok ? 1 : 0;
+}
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev)
+{
+ if (dev->is_checkpointed || dev->blocks_in_checkpt > 0) {
+ dev->is_checkpointed = 0;
+ yaffs2_checkpt_invalidate_stream(dev);
+ }
+ if (dev->param.sb_dirty_fn)
+ dev->param.sb_dirty_fn(dev);
+}
+
+int yaffs_checkpoint_save(struct yaffs_dev *dev)
+{
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "save entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+
+ if (!dev->is_checkpointed) {
+ yaffs2_checkpt_invalidate(dev);
+ yaffs2_wr_checkpt_data(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT | YAFFS_TRACE_MOUNT,
+ "save exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return dev->is_checkpointed;
+}
+
+int yaffs2_checkpt_restore(struct yaffs_dev *dev)
+{
+ int retval;
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore entry: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ retval = yaffs2_rd_checkpt_data(dev);
+
+ if (dev->is_checkpointed) {
+ yaffs_verify_objects(dev);
+ yaffs_verify_blocks(dev);
+ yaffs_verify_free_chunks(dev);
+ }
+
+ yaffs_trace(YAFFS_TRACE_CHECKPOINT,
+ "restore exit: is_checkpointed %d",
+ dev->is_checkpointed);
+
+ return retval;
+}
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size)
+{
+ /* if new_size > old_file_size.
+ * We're going to be writing a hole.
+ * If the hole is small then write zeros otherwise write a start
+ * of hole marker.
+ */
+ loff_t old_file_size;
+ loff_t increase;
+ int small_hole;
+ int result = YAFFS_OK;
+ struct yaffs_dev *dev = NULL;
+ u8 *local_buffer = NULL;
+ int small_increase_ok = 0;
+
+ if (!obj)
+ return YAFFS_FAIL;
+
+ if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ return YAFFS_FAIL;
+
+ dev = obj->my_dev;
+
+ /* Bail out if not yaffs2 mode */
+ if (!dev->param.is_yaffs2)
+ return YAFFS_OK;
+
+ old_file_size = obj->variant.file_variant.file_size;
+
+ if (new_size <= old_file_size)
+ return YAFFS_OK;
+
+ increase = new_size - old_file_size;
+
+ if (increase < YAFFS_SMALL_HOLE_THRESHOLD * dev->data_bytes_per_chunk &&
+ yaffs_check_alloc_available(dev, YAFFS_SMALL_HOLE_THRESHOLD + 1))
+ small_hole = 1;
+ else
+ small_hole = 0;
+
+ if (small_hole)
+ local_buffer = yaffs_get_temp_buffer(dev);
+
+ if (local_buffer) {
+ /* fill hole with zero bytes */
+ loff_t pos = old_file_size;
+ int this_write;
+ int written;
+ memset(local_buffer, 0, dev->data_bytes_per_chunk);
+ small_increase_ok = 1;
+
+ while (increase > 0 && small_increase_ok) {
+ this_write = increase;
+ if (this_write > dev->data_bytes_per_chunk)
+ this_write = dev->data_bytes_per_chunk;
+ written =
+ yaffs_do_file_wr(obj, local_buffer, pos, this_write,
+ 0);
+ if (written == this_write) {
+ pos += this_write;
+ increase -= this_write;
+ } else {
+ small_increase_ok = 0;
+ }
+ }
+
+ yaffs_release_temp_buffer(dev, local_buffer);
+
+ /* If out of space then reverse any chunks we've added */
+ if (!small_increase_ok)
+ yaffs_resize_file_down(obj, old_file_size);
+ }
+
+ if (!small_increase_ok &&
+ obj->parent &&
+ obj->parent->obj_id != YAFFS_OBJECTID_UNLINKED &&
+ obj->parent->obj_id != YAFFS_OBJECTID_DELETED) {
+ /* Write a hole start header with the old file size */
+ yaffs_update_oh(obj, NULL, 0, 1, 0, NULL);
+ }
+
+ return result;
+}
+
+struct yaffs_block_index {
+ int seq;
+ int block;
+};
+
+static int yaffs2_ybicmp(const void *a, const void *b)
+{
+ int aseq = ((struct yaffs_block_index *)a)->seq;
+ int bseq = ((struct yaffs_block_index *)b)->seq;
+ int ablock = ((struct yaffs_block_index *)a)->block;
+ int bblock = ((struct yaffs_block_index *)b)->block;
+
+ if (aseq == bseq)
+ return ablock - bblock;
+
+ return aseq - bseq;
+}
+
+static inline int yaffs2_scan_chunk(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi,
+ int blk, int chunk_in_block,
+ int *found_chunks,
+ u8 *chunk_data,
+ struct list_head *hard_list,
+ int summary_available)
+{
+ struct yaffs_obj_hdr *oh;
+ struct yaffs_obj *in;
+ struct yaffs_obj *parent;
+ int equiv_id;
+ loff_t file_size;
+ int is_shrink;
+ int is_unlinked;
+ struct yaffs_ext_tags tags;
+ int alloc_failed = 0;
+ int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
+ struct yaffs_file_var *file_var;
+ struct yaffs_hardlink_var *hl_var;
+ struct yaffs_symlink_var *sl_var;
+
+ if (summary_available) {
+ yaffs_summary_fetch(dev, &tags, chunk_in_block);
+ tags.seq_number = bi->seq_number;
+ }
+
+ if (!summary_available || tags.obj_id == 0) {
+ yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+ dev->tags_used++;
+ } else {
+ dev->summary_used++;
+ }
+
+ /* Let's have a good look at this chunk... */
+
+ if (!tags.chunk_used) {
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing
+ * the erased check. Just skip it so that it can
+ * be deleted.
+ * But, more typically, We get here when this is
+ * an unallocated chunk and his means that
+ * either the block is empty or this is the one
+ * being allocated from
+ */
+
+ if (*found_chunks) {
+ /* This is a chunk that was skipped due
+ * to failing the erased check */
+ } else if (chunk_in_block == 0) {
+ /* We're looking at the first chunk in
+ * the block so the block is unused */
+ bi->block_state = YAFFS_BLOCK_STATE_EMPTY;
+ dev->n_erased_blocks++;
+ } else {
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ if (dev->seq_number == bi->seq_number) {
+ /* Allocating from this block*/
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Allocating from %d %d",
+ blk, chunk_in_block);
+
+ bi->block_state =
+ YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->alloc_block = blk;
+ dev->alloc_page = chunk_in_block;
+ dev->alloc_block_finder = blk;
+ } else {
+ /* This is a partially written block
+ * that is not the current
+ * allocation block.
+ */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Partially written block %d detected. gc will fix this.",
+ blk);
+ }
+ }
+ }
+
+ dev->n_free_chunks++;
+
+ } else if (tags.ecc_result ==
+ YAFFS_ECC_RESULT_UNFIXED) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ " Unfixed ECC in chunk(%d:%d), chunk ignored",
+ blk, chunk_in_block);
+ dev->n_free_chunks++;
+ } else if (tags.obj_id > YAFFS_MAX_OBJECT_ID ||
+ tags.chunk_id > YAFFS_MAX_CHUNK_ID ||
+ tags.obj_id == YAFFS_OBJECTID_SUMMARY ||
+ (tags.chunk_id > 0 &&
+ tags.n_bytes > dev->data_bytes_per_chunk) ||
+ tags.seq_number != bi->seq_number) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Chunk (%d:%d) with bad tags:obj = %d, chunk_id = %d, n_bytes = %d, ignored",
+ blk, chunk_in_block, tags.obj_id,
+ tags.chunk_id, tags.n_bytes);
+ dev->n_free_chunks++;
+ } else if (tags.chunk_id > 0) {
+ /* chunk_id > 0 so it is a data chunk... */
+ loff_t endpos;
+ loff_t chunk_base = (tags.chunk_id - 1) *
+ dev->data_bytes_per_chunk;
+
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ YAFFS_OBJECT_TYPE_FILE);
+ if (!in)
+ /* Out of memory */
+ alloc_failed = 1;
+
+ if (in &&
+ in->variant_type == YAFFS_OBJECT_TYPE_FILE &&
+ chunk_base < in->variant.file_variant.shrink_size) {
+ /* This has not been invalidated by
+ * a resize */
+ if (!yaffs_put_chunk_in_file(in, tags.chunk_id,
+ chunk, -1))
+ alloc_failed = 1;
+
+ /* File size is calculated by looking at
+ * the data chunks if we have not
+ * seen an object header yet.
+ * Stop this practice once we find an
+ * object header.
+ */
+ endpos = chunk_base + tags.n_bytes;
+
+ if (!in->valid &&
+ in->variant.file_variant.scanned_size < endpos) {
+ in->variant.file_variant.
+ scanned_size = endpos;
+ in->variant.file_variant.
+ file_size = endpos;
+ }
+ } else if (in) {
+ /* This chunk has been invalidated by a
+ * resize, or a past file deletion
+ * so delete the chunk*/
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+ } else {
+ /* chunk_id == 0, so it is an ObjectHeader.
+ * Thus, we read in the object header and make
+ * the object
+ */
+ *found_chunks = 1;
+
+ yaffs_set_chunk_bit(dev, blk, chunk_in_block);
+ bi->pages_in_use++;
+
+ oh = NULL;
+ in = NULL;
+
+ if (tags.extra_available) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id,
+ tags.extra_obj_type);
+ if (!in)
+ alloc_failed = 1;
+ }
+
+ if (!in ||
+ (!in->valid && dev->param.disable_lazy_load) ||
+ tags.extra_shadows ||
+ (!in->valid && (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND))) {
+
+ /* If we don't have valid info then we
+ * need to read the chunk
+ * TODO In future we can probably defer
+ * reading the chunk and living with
+ * invalid data until needed.
+ */
+
+ yaffs_rd_chunk_tags_nand(dev, chunk, chunk_data, NULL);
+
+ oh = (struct yaffs_obj_hdr *)chunk_data;
+
+ if (dev->param.inband_tags) {
+ /* Fix up the header if they got
+ * corrupted by inband tags */
+ oh->shadows_obj =
+ oh->inband_shadowed_obj_id;
+ oh->is_shrink =
+ oh->inband_is_shrink;
+ }
+
+ if (!in) {
+ in = yaffs_find_or_create_by_number(dev,
+ tags.obj_id, oh->type);
+ if (!in)
+ alloc_failed = 1;
+ }
+ }
+
+ if (!in) {
+ /* TODO Hoosterman we have a problem! */
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Could not make object for object %d at chunk %d during scan",
+ tags.obj_id, chunk);
+ return YAFFS_FAIL;
+ }
+
+ if (in->valid) {
+ /* We have already filled this one.
+ * We have a duplicate that will be
+ * discarded, but we first have to suck
+ * out resize info if it is a file.
+ */
+ if ((in->variant_type == YAFFS_OBJECT_TYPE_FILE) &&
+ ((oh && oh->type == YAFFS_OBJECT_TYPE_FILE) ||
+ (tags.extra_available &&
+ tags.extra_obj_type == YAFFS_OBJECT_TYPE_FILE)
+ )) {
+ loff_t this_size = (oh) ?
+ yaffs_oh_to_size(oh) :
+ tags.extra_file_size;
+ u32 parent_obj_id = (oh) ?
+ oh->parent_obj_id :
+ tags.extra_parent_id;
+
+ is_shrink = (oh) ?
+ oh->is_shrink :
+ tags.extra_is_shrink;
+
+ /* If it is deleted (unlinked
+ * at start also means deleted)
+ * we treat the file size as
+ * being zeroed at this point.
+ */
+ if (parent_obj_id == YAFFS_OBJECTID_DELETED ||
+ parent_obj_id == YAFFS_OBJECTID_UNLINKED) {
+ this_size = 0;
+ is_shrink = 1;
+ }
+
+ if (is_shrink &&
+ in->variant.file_variant.shrink_size >
+ this_size)
+ in->variant.file_variant.shrink_size =
+ this_size;
+
+ if (is_shrink)
+ bi->has_shrink_hdr = 1;
+ }
+ /* Use existing - destroy this one. */
+ yaffs_chunk_del(dev, chunk, 1, __LINE__);
+ }
+
+ if (!in->valid && in->variant_type !=
+ (oh ? oh->type : tags.extra_obj_type))
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: Bad object type, %d != %d, for object %d at chunk %d during scan",
+ oh ? oh->type : tags.extra_obj_type,
+ in->variant_type, tags.obj_id,
+ chunk);
+
+ if (!in->valid &&
+ (tags.obj_id == YAFFS_OBJECTID_ROOT ||
+ tags.obj_id == YAFFS_OBJECTID_LOSTNFOUND)) {
+ /* We only load some info, don't fiddle
+ * with directory structure */
+ in->valid = 1;
+
+ if (oh) {
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+ in->lazy_loaded = 0;
+ } else {
+ in->lazy_loaded = 1;
+ }
+ in->hdr_chunk = chunk;
+
+ } else if (!in->valid) {
+ /* we need to load this info */
+ in->valid = 1;
+ in->hdr_chunk = chunk;
+ if (oh) {
+ in->variant_type = oh->type;
+ in->yst_mode = oh->yst_mode;
+ yaffs_load_attribs(in, oh);
+
+ if (oh->shadows_obj > 0)
+ yaffs_handle_shadowed_obj(dev,
+ oh->shadows_obj, 1);
+
+ yaffs_set_obj_name_from_oh(in, oh);
+ parent = yaffs_find_or_create_by_number(dev,
+ oh->parent_obj_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = yaffs_oh_to_size(oh);
+ is_shrink = oh->is_shrink;
+ equiv_id = oh->equiv_id;
+ } else {
+ in->variant_type = tags.extra_obj_type;
+ parent = yaffs_find_or_create_by_number(dev,
+ tags.extra_parent_id,
+ YAFFS_OBJECT_TYPE_DIRECTORY);
+ file_size = tags.extra_file_size;
+ is_shrink = tags.extra_is_shrink;
+ equiv_id = tags.extra_equiv_id;
+ in->lazy_loaded = 1;
+ }
+ in->dirty = 0;
+
+ if (!parent)
+ alloc_failed = 1;
+
+ /* directory stuff...
+ * hook up to parent
+ */
+
+ if (parent &&
+ parent->variant_type == YAFFS_OBJECT_TYPE_UNKNOWN) {
+ /* Set up as a directory */
+ parent->variant_type =
+ YAFFS_OBJECT_TYPE_DIRECTORY;
+ INIT_LIST_HEAD(&parent->
+ variant.dir_variant.children);
+ } else if (!parent ||
+ parent->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /* Hoosterman, another problem....
+ * Trying to use a non-directory as a directory
+ */
+
+ yaffs_trace(YAFFS_TRACE_ERROR,
+ "yaffs tragedy: attempting to use non-directory as a directory in scan. Put in lost+found."
+ );
+ parent = dev->lost_n_found;
+ }
+ yaffs_add_obj_to_dir(parent, in);
+
+ is_unlinked = (parent == dev->del_dir) ||
+ (parent == dev->unlinked_dir);
+
+ if (is_shrink)
+ /* Mark the block */
+ bi->has_shrink_hdr = 1;
+
+ /* Note re hardlinks.
+ * Since we might scan a hardlink before its equivalent
+ * object is scanned we put them all in a list.
+ * After scanning is complete, we should have all the
+ * objects, so we run through this list and fix up all
+ * the chains.
+ */
+
+ switch (in->variant_type) {
+ case YAFFS_OBJECT_TYPE_UNKNOWN:
+ /* Todo got a problem */
+ break;
+ case YAFFS_OBJECT_TYPE_FILE:
+ file_var = &in->variant.file_variant;
+ if (file_var->scanned_size < file_size) {
+ /* This covers the case where the file
+ * size is greater than the data held.
+ * This will happen if the file is
+ * resized to be larger than its
+ * current data extents.
+ */
+ file_var->file_size = file_size;
+ file_var->scanned_size = file_size;
+ }
+
+ if (file_var->shrink_size > file_size)
+ file_var->shrink_size = file_size;
+
+ break;
+ case YAFFS_OBJECT_TYPE_HARDLINK:
+ hl_var = &in->variant.hardlink_variant;
+ if (!is_unlinked) {
+ hl_var->equiv_id = equiv_id;
+ list_add(&in->hard_links, hard_list);
+ }
+ break;
+ case YAFFS_OBJECT_TYPE_DIRECTORY:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SPECIAL:
+ /* Do nothing */
+ break;
+ case YAFFS_OBJECT_TYPE_SYMLINK:
+ sl_var = &in->variant.symlink_variant;
+ if (oh) {
+ sl_var->alias =
+ yaffs_clone_str(oh->alias);
+ if (!sl_var->alias)
+ alloc_failed = 1;
+ }
+ break;
+ }
+ }
+ }
+ return alloc_failed ? YAFFS_FAIL : YAFFS_OK;
+}
+
+int yaffs2_scan_backwards(struct yaffs_dev *dev)
+{
+ int blk;
+ int block_iter;
+ int start_iter;
+ int end_iter;
+ int n_to_scan = 0;
+ enum yaffs_block_state state;
+ int c;
+ LIST_HEAD(hard_list);
+ struct yaffs_block_info *bi;
+ u32 seq_number;
+ int n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
+ u8 *chunk_data;
+ int found_chunks;
+ int alloc_failed = 0;
+ struct yaffs_block_index *block_index = NULL;
+ int alt_block_index = 0;
+ int summary_available;
+
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards starts intstartblk %d intendblk %d...",
+ dev->internal_start_block, dev->internal_end_block);
+
+ dev->seq_number = YAFFS_LOWEST_SEQUENCE_NUMBER;
+
+ block_index =
+ kmalloc(n_blocks * sizeof(struct yaffs_block_index), GFP_NOFS);
+
+ if (!block_index) {
+ block_index =
+ vmalloc(n_blocks * sizeof(struct yaffs_block_index));
+ alt_block_index = 1;
+ }
+
+ if (!block_index) {
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "yaffs2_scan_backwards() could not allocate block index!"
+ );
+ return YAFFS_FAIL;
+ }
+
+ dev->blocks_in_checkpt = 0;
+
+ chunk_data = yaffs_get_temp_buffer(dev);
+
+ /* Scan all the blocks to determine their state */
+ bi = dev->block_info;
+ for (blk = dev->internal_start_block; blk <= dev->internal_end_block;
+ blk++) {
+ yaffs_clear_chunk_bits(dev, blk);
+ bi->pages_in_use = 0;
+ bi->soft_del_pages = 0;
+
+ yaffs_query_init_block_state(dev, blk, &state, &seq_number);
+
+ bi->block_state = state;
+ bi->seq_number = seq_number;
+
+ if (bi->seq_number == YAFFS_SEQUENCE_CHECKPOINT_DATA)
+ bi->block_state = YAFFS_BLOCK_STATE_CHECKPOINT;
+ if (bi->seq_number == YAFFS_SEQUENCE_BAD_BLOCK)
+ bi->block_state = YAFFS_BLOCK_STATE_DEAD;
+
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG,
+ "Block scanning block %d state %d seq %d",
+ blk, bi->block_state, seq_number);
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT) {
+ dev->blocks_in_checkpt++;
+
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_DEAD) {
+ yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
+ "block %d is bad", blk);
+ } else if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) {
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "Block empty ");
+ dev->n_erased_blocks++;
+ dev->n_free_chunks += dev->param.chunks_per_block;
+ } else if (bi->block_state ==
+ YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* Determine the highest sequence number */
+ if (seq_number >= YAFFS_LOWEST_SEQUENCE_NUMBER &&
+ seq_number < YAFFS_HIGHEST_SEQUENCE_NUMBER) {
+ block_index[n_to_scan].seq = seq_number;
+ block_index[n_to_scan].block = blk;
+ n_to_scan++;
+ if (seq_number >= dev->seq_number)
+ dev->seq_number = seq_number;
+ } else {
+ /* TODO: Nasty sequence number! */
+ yaffs_trace(YAFFS_TRACE_SCAN,
+ "Block scanning block %d has bad sequence number %d",
+ blk, seq_number);
+ }
+ }
+ bi++;
+ }
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "%d blocks to be sorted...", n_to_scan);
+
+ cond_resched();
+
+ /* Sort the blocks by sequence number */
+ sort(block_index, n_to_scan, sizeof(struct yaffs_block_index),
+ yaffs2_ybicmp, NULL);
+
+ cond_resched();
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "...done");
+
+ /* Now scan the blocks looking at the data. */
+ start_iter = 0;
+ end_iter = n_to_scan - 1;
+ yaffs_trace(YAFFS_TRACE_SCAN_DEBUG, "%d blocks to scan", n_to_scan);
+
+ /* For each block.... backwards */
+ for (block_iter = end_iter;
+ !alloc_failed && block_iter >= start_iter;
+ block_iter--) {
+ /* Cooperative multitasking! This loop can run for so
+ long that watchdog timers expire. */
+ cond_resched();
+
+ /* get the block to scan in the correct order */
+ blk = block_index[block_iter].block;
+ bi = yaffs_get_block_info(dev, blk);
+
+ summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);
+
+ /* For each chunk in each block that needs scanning.... */
+ found_chunks = 0;
+ if (summary_available)
+ c = dev->chunks_per_summary - 1;
+ else
+ c = dev->param.chunks_per_block - 1;
+
+ for (/* c is already initialised */;
+ !alloc_failed && c >= 0 &&
+ (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN ||
+ bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING);
+ c--) {
+ /* Scan backwards...
+ * Read the tags and decide what to do
+ */
+ if (yaffs2_scan_chunk(dev, bi, blk, c,
+ &found_chunks, chunk_data,
+ &hard_list, summary_available) ==
+ YAFFS_FAIL)
+ alloc_failed = 1;
+ }
+
+ if (bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN) {
+ /* If we got this far while scanning, then the block
+ * is fully allocated. */
+ bi->block_state = YAFFS_BLOCK_STATE_FULL;
+ }
+
+ /* Now let's see if it was dirty */
+ if (bi->pages_in_use == 0 &&
+ !bi->has_shrink_hdr &&
+ bi->block_state == YAFFS_BLOCK_STATE_FULL) {
+ yaffs_block_became_dirty(dev, blk);
+ }
+ }
+
+ yaffs_skip_rest_of_block(dev);
+
+ if (alt_block_index)
+ vfree(block_index);
+ else
+ kfree(block_index);
+
+ /* Ok, we've done all the scanning.
+ * Fix up the hard link chains.
+ * We have scanned all the objects, now it's time to add these
+ * hardlinks.
+ */
+ yaffs_link_fixup(dev, &hard_list);
+
+ yaffs_release_temp_buffer(dev, chunk_data);
+
+ if (alloc_failed)
+ return YAFFS_FAIL;
+
+ yaffs_trace(YAFFS_TRACE_SCAN, "yaffs2_scan_backwards ends");
+
+ return YAFFS_OK;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.h b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.h
new file mode 100644
index 000000000..2363bfd8b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffs_yaffs2.h
@@ -0,0 +1,39 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+#ifndef __YAFFS_YAFFS2_H__
+#define __YAFFS_YAFFS2_H__
+
+#include "yaffs_guts.h"
+
+void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_find_oldest_dirty_seq(struct yaffs_dev *dev);
+void yaffs2_clear_oldest_dirty_seq(struct yaffs_dev *dev,
+ struct yaffs_block_info *bi);
+void yaffs2_update_oldest_dirty_seq(struct yaffs_dev *dev, unsigned block_no,
+ struct yaffs_block_info *bi);
+int yaffs_block_ok_for_gc(struct yaffs_dev *dev, struct yaffs_block_info *bi);
+u32 yaffs2_find_refresh_block(struct yaffs_dev *dev);
+int yaffs2_checkpt_required(struct yaffs_dev *dev);
+int yaffs_calc_checkpt_blocks_required(struct yaffs_dev *dev);
+
+void yaffs2_checkpt_invalidate(struct yaffs_dev *dev);
+int yaffs2_checkpt_save(struct yaffs_dev *dev);
+int yaffs2_checkpt_restore(struct yaffs_dev *dev);
+
+int yaffs2_handle_hole(struct yaffs_obj *obj, loff_t new_size);
+int yaffs2_scan_backwards(struct yaffs_dev *dev);
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffscfg.h b/qemu/roms/u-boot/fs/yaffs2/yaffscfg.h
new file mode 100644
index 000000000..718504eea
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffscfg.h
@@ -0,0 +1,38 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Header file for using yaffs in an application via
+ * a direct interface.
+ */
+
+
+#ifndef __YAFFSCFG_H__
+#define __YAFFSCFG_H__
+
+
+#include "yportenv.h"
+
+#define YAFFSFS_N_HANDLES 100
+#define YAFFSFS_N_DSC 20
+
+
+struct yaffsfs_DeviceConfiguration {
+ const YCHAR *prefix;
+ struct yaffs_dev *dev;
+};
+
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffsfs.c b/qemu/roms/u-boot/fs/yaffs2/yaffsfs.c
new file mode 100644
index 000000000..334598eed
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffsfs.c
@@ -0,0 +1,3217 @@
+/*
+ * YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <div64.h>
+#include "yaffsfs.h"
+#include "yaffs_guts.h"
+#include "yaffscfg.h"
+#include "yportenv.h"
+#include "yaffs_trace.h"
+
+#define YAFFSFS_MAX_SYMLINK_DEREFERENCES 5
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+/* YAFFSFS_RW_SIZE must be a power of 2 */
+#define YAFFSFS_RW_SHIFT (13)
+#define YAFFSFS_RW_SIZE (1<<YAFFSFS_RW_SHIFT)
+
+/* Some forward references */
+static struct yaffs_obj *yaffsfs_FindObject(struct yaffs_obj *relativeDirectory,
+ const YCHAR *path,
+ int symDepth, int getEquiv,
+ struct yaffs_obj **dirOut,
+ int *notDir, int *loop);
+
+static void yaffsfs_RemoveObjectCallback(struct yaffs_obj *obj);
+
+unsigned int yaffs_wr_attempts;
+
+/*
+ * Handle management.
+ * There are open inodes in struct yaffsfs_Inode.
+ * There are open file descriptors in yaffsfs_FileDes.
+ * There are open handles in yaffsfs_FileDes.
+ *
+ * Things are structured this way to be like the Linux VFS model
+ * so that interactions with the yaffs guts calls are similar.
+ * That means more common code paths and less special code.
+ * That means better testing etc.
+ *
+ * We have 3 layers because:
+ * A handle is different than an fd because you can use dup()
+ * to create a new handle that accesses the *same* fd. The two
+ * handles will use the same offset (part of the fd). We only close
+ * down the fd when there are no more handles accessing it.
+ *
+ * More than one fd can currently access one file, but each fd
+ * has its own permsiions and offset.
+ */
+
+struct yaffsfs_Inode {
+ int count; /* Number of handles accessing this inode */
+ struct yaffs_obj *iObj;
+};
+
+struct yaffsfs_FileDes {
+ u8 reading:1;
+ u8 writing:1;
+ u8 append:1;
+ u8 shareRead:1;
+ u8 shareWrite:1;
+ int inodeId:12; /* Index to corresponding yaffsfs_Inode */
+ int handleCount:10; /* Number of handles for this fd */
+ loff_t position; /* current position in file */
+};
+
+struct yaffsfs_Handle {
+ short int fdId;
+ short int useCount;
+};
+
+
+struct yaffsfs_DirSearchContxt {
+ struct yaffs_dirent de; /* directory entry */
+ YCHAR name[NAME_MAX + 1]; /* name of directory being searched */
+ struct yaffs_obj *dirObj; /* ptr to directory being searched */
+ struct yaffs_obj *nextReturn; /* obj returned by next readddir */
+ struct list_head others;
+ int offset:20;
+ unsigned inUse:1;
+};
+
+static struct yaffsfs_DirSearchContxt yaffsfs_dsc[YAFFSFS_N_DSC];
+static struct yaffsfs_Inode yaffsfs_inode[YAFFSFS_N_HANDLES];
+static struct yaffsfs_FileDes yaffsfs_fd[YAFFSFS_N_HANDLES];
+static struct yaffsfs_Handle yaffsfs_handle[YAFFSFS_N_HANDLES];
+
+static int yaffsfs_handlesInitialised;
+
+unsigned yaffs_set_trace(unsigned tm)
+{
+ yaffs_trace_mask = tm;
+ return yaffs_trace_mask;
+}
+
+unsigned yaffs_get_trace(void)
+{
+ return yaffs_trace_mask;
+}
+
+/*
+ * yaffsfs_InitHandle
+ * Inilitalise handle management on start-up.
+ */
+
+static void yaffsfs_InitHandles(void)
+{
+ int i;
+ if (yaffsfs_handlesInitialised)
+ return;
+
+ memset(yaffsfs_inode, 0, sizeof(yaffsfs_inode));
+ memset(yaffsfs_fd, 0, sizeof(yaffsfs_fd));
+ memset(yaffsfs_handle, 0, sizeof(yaffsfs_handle));
+ memset(yaffsfs_dsc, 0, sizeof(yaffsfs_dsc));
+
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++)
+ yaffsfs_fd[i].inodeId = -1;
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++)
+ yaffsfs_handle[i].fdId = -1;
+}
+
+static struct yaffsfs_Handle *yaffsfs_HandleToPointer(int h)
+{
+ if (h >= 0 && h < YAFFSFS_N_HANDLES)
+ return &yaffsfs_handle[h];
+ return NULL;
+}
+
+static struct yaffsfs_FileDes *yaffsfs_HandleToFileDes(int handle)
+{
+ struct yaffsfs_Handle *h = yaffsfs_HandleToPointer(handle);
+
+ if (h && h->useCount > 0 && h->fdId >= 0 && h->fdId < YAFFSFS_N_HANDLES)
+ return &yaffsfs_fd[h->fdId];
+
+ return NULL;
+}
+
+static struct yaffsfs_Inode *yaffsfs_HandleToInode(int handle)
+{
+ struct yaffsfs_FileDes *fd = yaffsfs_HandleToFileDes(handle);
+
+ if (fd && fd->handleCount > 0 &&
+ fd->inodeId >= 0 && fd->inodeId < YAFFSFS_N_HANDLES)
+ return &yaffsfs_inode[fd->inodeId];
+
+ return NULL;
+}
+
+static struct yaffs_obj *yaffsfs_HandleToObject(int handle)
+{
+ struct yaffsfs_Inode *in = yaffsfs_HandleToInode(handle);
+
+ if (in)
+ return in->iObj;
+
+ return NULL;
+}
+
+/*
+ * yaffsfs_FindInodeIdForObject
+ * Find the inode entry for an object, if it exists.
+ */
+
+static int yaffsfs_FindInodeIdForObject(struct yaffs_obj *obj)
+{
+ int i;
+ int ret = -1;
+
+ if (obj)
+ obj = yaffs_get_equivalent_obj(obj);
+
+ /* Look for it in open inode table */
+ for (i = 0; i < YAFFSFS_N_HANDLES && ret < 0; i++) {
+ if (yaffsfs_inode[i].iObj == obj)
+ ret = i;
+ }
+ return ret;
+}
+
+/*
+ * yaffsfs_GetInodeIdForObject
+ * Grab an inode entry when opening a new inode.
+ */
+static int yaffsfs_GetInodeIdForObject(struct yaffs_obj *obj)
+{
+ int i;
+ int ret;
+ struct yaffsfs_Inode *in = NULL;
+
+ if (obj)
+ obj = yaffs_get_equivalent_obj(obj);
+
+ ret = yaffsfs_FindInodeIdForObject(obj);
+
+ for (i = 0; i < YAFFSFS_N_HANDLES && ret < 0; i++) {
+ if (!yaffsfs_inode[i].iObj)
+ ret = i;
+ }
+
+ if (ret >= 0) {
+ in = &yaffsfs_inode[ret];
+ if (!in->iObj)
+ in->count = 0;
+ in->iObj = obj;
+ in->count++;
+ }
+
+ return ret;
+}
+
+static int yaffsfs_CountHandles(struct yaffs_obj *obj)
+{
+ int i = yaffsfs_FindInodeIdForObject(obj);
+
+ if (i >= 0)
+ return yaffsfs_inode[i].count;
+ else
+ return 0;
+}
+
+static void yaffsfs_ReleaseInode(struct yaffsfs_Inode *in)
+{
+ struct yaffs_obj *obj;
+
+ obj = in->iObj;
+
+ if (obj->unlinked)
+ yaffs_del_obj(obj);
+
+ obj->my_inode = NULL;
+ in->iObj = NULL;
+
+}
+
+static void yaffsfs_PutInode(int inodeId)
+{
+ if (inodeId >= 0 && inodeId < YAFFSFS_N_HANDLES) {
+ struct yaffsfs_Inode *in = &yaffsfs_inode[inodeId];
+ in->count--;
+ if (in->count <= 0) {
+ yaffsfs_ReleaseInode(in);
+ in->count = 0;
+ }
+ }
+}
+
+static int yaffsfs_NewHandle(struct yaffsfs_Handle **hptr)
+{
+ int i;
+ struct yaffsfs_Handle *h;
+
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++) {
+ h = &yaffsfs_handle[i];
+ if (h->useCount < 1) {
+ memset(h, 0, sizeof(struct yaffsfs_Handle));
+ h->fdId = -1;
+ h->useCount = 1;
+ if (hptr)
+ *hptr = h;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static int yaffsfs_NewHandleAndFileDes(void)
+{
+ int i;
+ struct yaffsfs_FileDes *fd;
+ struct yaffsfs_Handle *h = NULL;
+ int handle = yaffsfs_NewHandle(&h);
+
+ if (handle < 0)
+ return -1;
+
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++) {
+ fd = &yaffsfs_fd[i];
+ if (fd->handleCount < 1) {
+ memset(fd, 0, sizeof(struct yaffsfs_FileDes));
+ fd->inodeId = -1;
+ fd->handleCount = 1;
+ h->fdId = i;
+ return handle;
+ }
+ }
+
+ /* Dump the handle because we could not get a fd */
+ h->useCount = 0;
+ return -1;
+}
+
+/*
+ * yaffs_get_handle
+ * Increase use of handle when reading/writing a file
+ * Also gets the file descriptor.
+ */
+
+static int yaffsfs_GetHandle(int handle)
+{
+ struct yaffsfs_Handle *h = yaffsfs_HandleToPointer(handle);
+
+ if (h && h->useCount > 0) {
+ h->useCount++;
+ return 0;
+ }
+ return -1;
+}
+
+/*
+ * yaffs_put_handle
+ * Let go of a handle when closing a file or aborting an open or
+ * ending a read or write.
+ */
+
+static int yaffsfs_PutFileDes(int fdId)
+{
+ struct yaffsfs_FileDes *fd;
+
+ if (fdId >= 0 && fdId < YAFFSFS_N_HANDLES) {
+ fd = &yaffsfs_fd[fdId];
+ fd->handleCount--;
+ if (fd->handleCount < 1) {
+ if (fd->inodeId >= 0) {
+ yaffsfs_PutInode(fd->inodeId);
+ fd->inodeId = -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int yaffsfs_PutHandle(int handle)
+{
+ struct yaffsfs_Handle *h = yaffsfs_HandleToPointer(handle);
+
+ if (h && h->useCount > 0) {
+ h->useCount--;
+ if (h->useCount < 1) {
+ yaffsfs_PutFileDes(h->fdId);
+ h->fdId = -1;
+ }
+ }
+
+ return 0;
+}
+
+static void yaffsfs_BreakDeviceHandles(struct yaffs_dev *dev)
+{
+ struct yaffsfs_FileDes *fd;
+ struct yaffsfs_Handle *h;
+ struct yaffs_obj *obj;
+ int i;
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++) {
+ h = yaffsfs_HandleToPointer(i);
+ fd = yaffsfs_HandleToFileDes(i);
+ obj = yaffsfs_HandleToObject(i);
+ if (h && h->useCount > 0) {
+ h->useCount = 0;
+ h->fdId = 0;
+ }
+ if (fd && fd->handleCount > 0 && obj && obj->my_dev == dev) {
+ fd->handleCount = 0;
+ yaffsfs_PutInode(fd->inodeId);
+ fd->inodeId = -1;
+ }
+ }
+}
+
+/*
+ * Stuff to handle names.
+ */
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+
+static int yaffs_toupper(YCHAR a)
+{
+ if (a >= 'a' && a <= 'z')
+ return (a - 'a') + 'A';
+ else
+ return a;
+}
+
+int yaffsfs_Match(YCHAR a, YCHAR b)
+{
+ return (yaffs_toupper(a) == yaffs_toupper(b));
+}
+#else
+int yaffsfs_Match(YCHAR a, YCHAR b)
+{
+ /* case sensitive */
+ return (a == b);
+}
+#endif
+
+int yaffsfs_IsPathDivider(YCHAR ch)
+{
+ const YCHAR *str = YAFFS_PATH_DIVIDERS;
+
+ while (*str) {
+ if (*str == ch)
+ return 1;
+ str++;
+ }
+
+ return 0;
+}
+
+int yaffsfs_CheckNameLength(const char *name)
+{
+ int retVal = 0;
+
+ int nameLength = yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH + 1);
+
+ if (nameLength == 0) {
+ yaffsfs_SetError(-ENOENT);
+ retVal = -1;
+ } else if (nameLength > YAFFS_MAX_NAME_LENGTH) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ retVal = -1;
+ }
+
+ return retVal;
+}
+
+static int yaffsfs_alt_dir_path(const YCHAR *path, YCHAR **ret_path)
+{
+ YCHAR *alt_path = NULL;
+ int path_length;
+ int i;
+
+ /*
+ * We don't have a definition for max path length.
+ * We will use 3 * max name length instead.
+ */
+ *ret_path = NULL;
+ path_length = yaffs_strnlen(path, (YAFFS_MAX_NAME_LENGTH + 1) * 3 + 1);
+
+ /* If the last character is a path divider, then we need to
+ * trim it back so that the name look-up works properly.
+ * eg. /foo/new_dir/ -> /foo/newdir
+ * Curveball: Need to handle multiple path dividers:
+ * eg. /foof/sdfse///// -> /foo/sdfse
+ */
+ if (path_length > 0 && yaffsfs_IsPathDivider(path[path_length - 1])) {
+ alt_path = kmalloc(path_length + 1, 0);
+ if (!alt_path)
+ return -1;
+ yaffs_strcpy(alt_path, path);
+ for (i = path_length - 1;
+ i >= 0 && yaffsfs_IsPathDivider(alt_path[i]); i--)
+ alt_path[i] = (YCHAR) 0;
+ }
+ *ret_path = alt_path;
+ return 0;
+}
+
+LIST_HEAD(yaffsfs_deviceList);
+
+/*
+ * yaffsfs_FindDevice
+ * yaffsfs_FindRoot
+ * Scan the configuration list to find the device
+ * Curveballs: Should match paths that end in '/' too
+ * Curveball2 Might have "/x/ and "/x/y". Need to return the longest match
+ */
+static struct yaffs_dev *yaffsfs_FindDevice(const YCHAR *path,
+ YCHAR **restOfPath)
+{
+ struct list_head *cfg;
+ const YCHAR *leftOver;
+ const YCHAR *p;
+ struct yaffs_dev *retval = NULL;
+ struct yaffs_dev *dev = NULL;
+ int thisMatchLength;
+ int longestMatch = -1;
+ int matching;
+
+ /*
+ * Check all configs, choose the one that:
+ * 1) Actually matches a prefix (ie /a amd /abc will not match
+ * 2) Matches the longest.
+ */
+ list_for_each(cfg, &yaffsfs_deviceList) {
+ dev = list_entry(cfg, struct yaffs_dev, dev_list);
+ leftOver = path;
+ p = dev->param.name;
+ thisMatchLength = 0;
+ matching = 1;
+
+ while (matching && *p && *leftOver) {
+ /* Skip over any /s */
+ while (yaffsfs_IsPathDivider(*p))
+ p++;
+
+ /* Skip over any /s */
+ while (yaffsfs_IsPathDivider(*leftOver))
+ leftOver++;
+
+ /* Now match the text part */
+ while (matching &&
+ *p && !yaffsfs_IsPathDivider(*p) &&
+ *leftOver && !yaffsfs_IsPathDivider(*leftOver)) {
+ if (yaffsfs_Match(*p, *leftOver)) {
+ p++;
+ leftOver++;
+ thisMatchLength++;
+ } else {
+ matching = 0;
+ }
+ }
+ }
+
+ /* Skip over any /s in leftOver */
+ while (yaffsfs_IsPathDivider(*leftOver))
+ leftOver++;
+
+ /*Skip over any /s in p */
+ while (yaffsfs_IsPathDivider(*p))
+ p++;
+
+ /* p should now be at the end of the string if fully matched */
+ if (*p)
+ matching = 0;
+
+ if (matching && (thisMatchLength > longestMatch)) {
+ /* Matched prefix */
+ *restOfPath = (YCHAR *) leftOver;
+ retval = dev;
+ longestMatch = thisMatchLength;
+ }
+
+ }
+ return retval;
+}
+
+static int yaffsfs_CheckPath(const YCHAR *path)
+{
+ int n = 0;
+ int divs = 0;
+
+ while (*path && n < YAFFS_MAX_NAME_LENGTH && divs < 100) {
+ if (yaffsfs_IsPathDivider(*path)) {
+ n = 0;
+ divs++;
+ } else
+ n++;
+ path++;
+ }
+
+ return (*path) ? -1 : 0;
+}
+
+/* FindMountPoint only returns a dev entry if the path is a mount point */
+static struct yaffs_dev *yaffsfs_FindMountPoint(const YCHAR *path)
+{
+ struct yaffs_dev *dev;
+ YCHAR *restOfPath = NULL;
+
+ dev = yaffsfs_FindDevice(path, &restOfPath);
+ if (dev && restOfPath && *restOfPath)
+ dev = NULL;
+ return dev;
+}
+
+static struct yaffs_obj *yaffsfs_FindRoot(const YCHAR *path,
+ YCHAR **restOfPath)
+{
+ struct yaffs_dev *dev;
+
+ dev = yaffsfs_FindDevice(path, restOfPath);
+ if (dev && dev->is_mounted)
+ return dev->root_dir;
+
+ return NULL;
+}
+
+static struct yaffs_obj *yaffsfs_FollowLink(struct yaffs_obj *obj,
+ int symDepth, int *loop)
+{
+
+ if (obj)
+ obj = yaffs_get_equivalent_obj(obj);
+
+ while (obj && obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
+ YCHAR *alias = obj->variant.symlink_variant.alias;
+
+ if (yaffsfs_IsPathDivider(*alias))
+ /* Starts with a /, need to scan from root up */
+ obj = yaffsfs_FindObject(NULL, alias, symDepth++,
+ 1, NULL, NULL, loop);
+ else
+ /*
+ * Relative to here so use the parent of the
+ * symlink as a start
+ */
+ obj = yaffsfs_FindObject(obj->parent, alias, symDepth++,
+ 1, NULL, NULL, loop);
+ }
+ return obj;
+}
+
+/*
+ * yaffsfs_FindDirectory
+ * Parse a path to determine the directory and the name within the directory.
+ *
+ * eg. "/data/xx/ff" --> puts name="ff" and returns the directory "/data/xx"
+ */
+static struct yaffs_obj *yaffsfs_DoFindDirectory(struct yaffs_obj *startDir,
+ const YCHAR *path,
+ YCHAR **name, int symDepth,
+ int *notDir, int *loop)
+{
+ struct yaffs_obj *dir;
+ YCHAR *restOfPath;
+ YCHAR str[YAFFS_MAX_NAME_LENGTH + 1];
+ int i;
+
+ if (symDepth > YAFFSFS_MAX_SYMLINK_DEREFERENCES) {
+ if (loop)
+ *loop = 1;
+ return NULL;
+ }
+
+ if (startDir) {
+ dir = startDir;
+ restOfPath = (YCHAR *) path;
+ } else
+ dir = yaffsfs_FindRoot(path, &restOfPath);
+
+ while (dir) {
+ /*
+ * parse off /.
+ * curve ball: also throw away surplus '/'
+ * eg. "/ram/x////ff" gets treated the same as "/ram/x/ff"
+ */
+ while (yaffsfs_IsPathDivider(*restOfPath))
+ restOfPath++; /* get rid of '/' */
+
+ *name = restOfPath;
+ i = 0;
+
+ while (*restOfPath && !yaffsfs_IsPathDivider(*restOfPath)) {
+ if (i < YAFFS_MAX_NAME_LENGTH) {
+ str[i] = *restOfPath;
+ str[i + 1] = '\0';
+ i++;
+ }
+ restOfPath++;
+ }
+
+ if (!*restOfPath)
+ /* got to the end of the string */
+ return dir;
+ else {
+ if (yaffs_strcmp(str, _Y(".")) == 0) {
+ /* Do nothing */
+ } else if (yaffs_strcmp(str, _Y("..")) == 0) {
+ dir = dir->parent;
+ } else {
+ dir = yaffs_find_by_name(dir, str);
+
+ dir = yaffsfs_FollowLink(dir, symDepth, loop);
+
+ if (dir && dir->variant_type !=
+ YAFFS_OBJECT_TYPE_DIRECTORY) {
+ if (notDir)
+ *notDir = 1;
+ dir = NULL;
+ }
+
+ }
+ }
+ }
+ /* directory did not exist. */
+ return NULL;
+}
+
+static struct yaffs_obj *yaffsfs_FindDirectory(struct yaffs_obj *relDir,
+ const YCHAR *path,
+ YCHAR **name,
+ int symDepth,
+ int *notDir, int *loop)
+{
+ return yaffsfs_DoFindDirectory(relDir, path, name, symDepth, notDir,
+ loop);
+}
+
+/*
+ * yaffsfs_FindObject turns a path for an existing object into the object
+ */
+static struct yaffs_obj *yaffsfs_FindObject(struct yaffs_obj *relDir,
+ const YCHAR *path, int symDepth,
+ int getEquiv,
+ struct yaffs_obj **dirOut,
+ int *notDir, int *loop)
+{
+ struct yaffs_obj *dir;
+ struct yaffs_obj *obj;
+ YCHAR *name;
+
+ dir =
+ yaffsfs_FindDirectory(relDir, path, &name, symDepth, notDir, loop);
+
+ if (dirOut)
+ *dirOut = dir;
+
+ if (dir && *name)
+ obj = yaffs_find_by_name(dir, name);
+ else
+ obj = dir;
+
+ if (getEquiv)
+ obj = yaffs_get_equivalent_obj(obj);
+
+ return obj;
+}
+
+/*************************************************************************
+ * Start of yaffsfs visible functions.
+ *************************************************************************/
+
+int yaffs_dup(int handle)
+{
+ int newHandleNumber = -1;
+ struct yaffsfs_FileDes *existingFD = NULL;
+ struct yaffsfs_Handle *existingHandle = NULL;
+ struct yaffsfs_Handle *newHandle = NULL;
+
+ yaffsfs_Lock();
+ existingHandle = yaffsfs_HandleToPointer(handle);
+ existingFD = yaffsfs_HandleToFileDes(handle);
+ if (existingFD)
+ newHandleNumber = yaffsfs_NewHandle(&newHandle);
+ if (newHandle) {
+ newHandle->fdId = existingHandle->fdId;
+ existingFD->handleCount++;
+ }
+
+ yaffsfs_Unlock();
+
+ if (!existingFD)
+ yaffsfs_SetError(-EBADF);
+ else if (!newHandle)
+ yaffsfs_SetError(-ENOMEM);
+
+ return newHandleNumber;
+
+}
+
+static int yaffsfs_TooManyObjects(struct yaffs_dev *dev)
+{
+ int current_objects = dev->n_obj - dev->n_deleted_files;
+
+ if (dev->param.max_objects && current_objects > dev->param.max_objects)
+ return 1;
+ else
+ return 0;
+}
+
+int yaffs_open_sharing(const YCHAR *path, int oflag, int mode, int sharing)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ YCHAR *name;
+ int handle = -1;
+ struct yaffsfs_FileDes *fd = NULL;
+ int openDenied = 0;
+ int symDepth = 0;
+ int errorReported = 0;
+ int rwflags = oflag & (O_RDWR | O_RDONLY | O_WRONLY);
+ u8 shareRead = (sharing & YAFFS_SHARE_READ) ? 1 : 0;
+ u8 shareWrite = (sharing & YAFFS_SHARE_WRITE) ? 1 : 0;
+ u8 sharedReadAllowed;
+ u8 sharedWriteAllowed;
+ u8 alreadyReading;
+ u8 alreadyWriting;
+ u8 readRequested;
+ u8 writeRequested;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ /* O_EXCL only has meaning if O_CREAT is specified */
+ if (!(oflag & O_CREAT))
+ oflag &= ~(O_EXCL);
+
+ /* O_TRUNC has no meaning if (O_CREAT | O_EXCL) is specified */
+ if ((oflag & O_CREAT) & (oflag & O_EXCL))
+ oflag &= ~(O_TRUNC);
+
+ /* Todo: Are there any more flag combos to sanitise ? */
+
+ /* Figure out if reading or writing is requested */
+
+ readRequested = (rwflags == O_RDWR || rwflags == O_RDONLY) ? 1 : 0;
+ writeRequested = (rwflags == O_RDWR || rwflags == O_WRONLY) ? 1 : 0;
+
+ yaffsfs_Lock();
+
+ handle = yaffsfs_NewHandleAndFileDes();
+
+ if (handle < 0) {
+ yaffsfs_SetError(-ENFILE);
+ errorReported = 1;
+ } else {
+
+ fd = yaffsfs_HandleToFileDes(handle);
+
+ /* try to find the exisiting object */
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, NULL, NULL, NULL);
+
+ obj = yaffsfs_FollowLink(obj, symDepth++, &loop);
+
+ if (obj &&
+ obj->variant_type != YAFFS_OBJECT_TYPE_FILE &&
+ obj->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ obj = NULL;
+
+ if (obj) {
+
+ /* The file already exists or it might be a directory */
+
+ /* A directory can't be opened as a file */
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+ openDenied = 1;
+ yaffsfs_SetError(-EISDIR);
+ errorReported = 1;
+ }
+
+ /* Open should fail if O_CREAT and O_EXCL are specified
+ * for a file that exists.
+ */
+ if (!errorReported &&
+ (oflag & O_EXCL) && (oflag & O_CREAT)) {
+ openDenied = 1;
+ yaffsfs_SetError(-EEXIST);
+ errorReported = 1;
+ }
+
+ /* Check file permissions */
+ if (readRequested && !(obj->yst_mode & S_IREAD))
+ openDenied = 1;
+
+ if (writeRequested && !(obj->yst_mode & S_IWRITE))
+ openDenied = 1;
+
+ if (!errorReported && writeRequested &&
+ obj->my_dev->read_only) {
+ openDenied = 1;
+ yaffsfs_SetError(-EROFS);
+ errorReported = 1;
+ }
+
+ if (openDenied && !errorReported) {
+ yaffsfs_SetError(-EACCES);
+ errorReported = 1;
+ }
+
+ /* Check sharing of an existing object. */
+ if (!openDenied) {
+ struct yaffsfs_FileDes *fdx;
+ int i;
+
+ sharedReadAllowed = 1;
+ sharedWriteAllowed = 1;
+ alreadyReading = 0;
+ alreadyWriting = 0;
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++) {
+ fdx = &yaffsfs_fd[i];
+ if (fdx->handleCount > 0 &&
+ fdx->inodeId >= 0 &&
+ yaffsfs_inode[fdx->inodeId].iObj
+ == obj) {
+ if (!fdx->shareRead)
+ sharedReadAllowed = 0;
+ if (!fdx->shareWrite)
+ sharedWriteAllowed = 0;
+ if (fdx->reading)
+ alreadyReading = 1;
+ if (fdx->writing)
+ alreadyWriting = 1;
+ }
+ }
+
+ if ((!sharedReadAllowed && readRequested) ||
+ (!shareRead && alreadyReading) ||
+ (!sharedWriteAllowed && writeRequested) ||
+ (!shareWrite && alreadyWriting)) {
+ openDenied = 1;
+ yaffsfs_SetError(-EBUSY);
+ errorReported = 1;
+ }
+ }
+
+ }
+
+ /* If we could not open an existing object, then let's see if
+ * the directory exists. If not, error.
+ */
+ if (!obj && !errorReported) {
+ dir = yaffsfs_FindDirectory(NULL, path, &name, 0,
+ &notDir, &loop);
+ if (!dir && notDir) {
+ yaffsfs_SetError(-ENOTDIR);
+ errorReported = 1;
+ } else if (loop) {
+ yaffsfs_SetError(-ELOOP);
+ errorReported = 1;
+ } else if (!dir) {
+ yaffsfs_SetError(-ENOENT);
+ errorReported = 1;
+ }
+ }
+
+ if (!obj && dir && !errorReported && (oflag & O_CREAT)) {
+ /* Let's see if we can create this file */
+ if (dir->my_dev->read_only) {
+ yaffsfs_SetError(-EROFS);
+ errorReported = 1;
+ } else if (yaffsfs_TooManyObjects(dir->my_dev)) {
+ yaffsfs_SetError(-ENFILE);
+ errorReported = 1;
+ } else
+ obj = yaffs_create_file(dir, name, mode, 0, 0);
+
+ if (!obj && !errorReported) {
+ yaffsfs_SetError(-ENOSPC);
+ errorReported = 1;
+ }
+ }
+
+ if (!obj && dir && !errorReported && !(oflag & O_CREAT)) {
+ yaffsfs_SetError(-ENOENT);
+ errorReported = 1;
+ }
+
+ if (obj && !openDenied) {
+ int inodeId = yaffsfs_GetInodeIdForObject(obj);
+
+ if (inodeId < 0) {
+ /*
+ * Todo: Fix any problem if inodes run out,
+ * That can't happen if the number of inode
+ * items >= number of handles.
+ */
+ }
+
+ fd->inodeId = inodeId;
+ fd->reading = readRequested;
+ fd->writing = writeRequested;
+ fd->append = (oflag & O_APPEND) ? 1 : 0;
+ fd->position = 0;
+ fd->shareRead = shareRead;
+ fd->shareWrite = shareWrite;
+
+ /* Hook inode to object */
+ obj->my_inode = (void *)&yaffsfs_inode[inodeId];
+
+ if ((oflag & O_TRUNC) && fd->writing)
+ yaffs_resize_file(obj, 0);
+ } else {
+ yaffsfs_PutHandle(handle);
+ if (!errorReported)
+ yaffsfs_SetError(0); /* Problem */
+ handle = -1;
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return handle;
+}
+
+int yaffs_open(const YCHAR *path, int oflag, int mode)
+{
+ return yaffs_open_sharing(path, oflag, mode,
+ YAFFS_SHARE_READ | YAFFS_SHARE_WRITE);
+}
+
+int yaffs_Dofsync(int handle, int datasync)
+{
+ int retVal = -1;
+ struct yaffs_obj *obj;
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_HandleToObject(handle);
+
+ if (!obj)
+ yaffsfs_SetError(-EBADF);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else {
+ yaffs_flush_file(obj, 1, datasync);
+ retVal = 0;
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+int yaffs_fsync(int handle)
+{
+ return yaffs_Dofsync(handle, 0);
+}
+
+int yaffs_flush(int handle)
+{
+ return yaffs_fsync(handle);
+}
+
+int yaffs_fdatasync(int handle)
+{
+ return yaffs_Dofsync(handle, 1);
+}
+
+int yaffs_close(int handle)
+{
+ struct yaffsfs_Handle *h = NULL;
+ struct yaffs_obj *obj = NULL;
+ int retVal = -1;
+
+ yaffsfs_Lock();
+
+ h = yaffsfs_HandleToPointer(handle);
+ obj = yaffsfs_HandleToObject(handle);
+
+ if (!h || !obj)
+ yaffsfs_SetError(-EBADF);
+ else {
+ /* clean up */
+ yaffs_flush_file(obj, 1, 0);
+ yaffsfs_PutHandle(handle);
+ retVal = 0;
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+int yaffsfs_do_read(int handle, void *vbuf, unsigned int nbyte,
+ int isPread, loff_t offset)
+{
+ struct yaffsfs_FileDes *fd = NULL;
+ struct yaffs_obj *obj = NULL;
+ loff_t pos = 0;
+ loff_t startPos = 0;
+ loff_t endPos = 0;
+ int nRead = 0;
+ int nToRead = 0;
+ int totalRead = 0;
+ loff_t maxRead;
+ u8 *buf = (u8 *) vbuf;
+
+ if (!vbuf) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ fd = yaffsfs_HandleToFileDes(handle);
+ obj = yaffsfs_HandleToObject(handle);
+
+ if (!fd || !obj) {
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+ totalRead = -1;
+ } else if (!fd->reading) {
+ /* Not a reading handle */
+ yaffsfs_SetError(-EINVAL);
+ totalRead = -1;
+ } else if (nbyte > YAFFS_MAX_FILE_SIZE) {
+ yaffsfs_SetError(-EINVAL);
+ totalRead = -1;
+ } else {
+ if (isPread)
+ startPos = offset;
+ else
+ startPos = fd->position;
+
+ pos = startPos;
+
+ if (yaffs_get_obj_length(obj) > pos)
+ maxRead = yaffs_get_obj_length(obj) - pos;
+ else
+ maxRead = 0;
+
+ if (nbyte > maxRead)
+ nbyte = maxRead;
+
+ yaffsfs_GetHandle(handle);
+
+ endPos = pos + nbyte;
+
+ if (pos < 0 || pos > YAFFS_MAX_FILE_SIZE ||
+ nbyte > YAFFS_MAX_FILE_SIZE ||
+ endPos < 0 || endPos > YAFFS_MAX_FILE_SIZE) {
+ totalRead = -1;
+ nbyte = 0;
+ }
+
+ while (nbyte > 0) {
+ nToRead = YAFFSFS_RW_SIZE -
+ (pos & (YAFFSFS_RW_SIZE - 1));
+ if (nToRead > nbyte)
+ nToRead = nbyte;
+
+ /* Tricky bit...
+ * Need to reverify object in case the device was
+ * unmounted in another thread.
+ */
+ obj = yaffsfs_HandleToObject(handle);
+ if (!obj)
+ nRead = 0;
+ else
+ nRead = yaffs_file_rd(obj, buf, pos, nToRead);
+
+ if (nRead > 0) {
+ totalRead += nRead;
+ pos += nRead;
+ buf += nRead;
+ }
+
+ if (nRead == nToRead)
+ nbyte -= nRead;
+ else
+ nbyte = 0; /* no more to read */
+
+ if (nbyte > 0) {
+ yaffsfs_Unlock();
+ yaffsfs_Lock();
+ }
+
+ }
+
+ yaffsfs_PutHandle(handle);
+
+ if (!isPread) {
+ if (totalRead >= 0)
+ fd->position = startPos + totalRead;
+ else
+ yaffsfs_SetError(-EINVAL);
+ }
+
+ }
+
+ yaffsfs_Unlock();
+
+ return (totalRead >= 0) ? totalRead : -1;
+
+}
+
+int yaffs_read(int handle, void *buf, unsigned int nbyte)
+{
+ return yaffsfs_do_read(handle, buf, nbyte, 0, 0);
+}
+
+int yaffs_pread(int handle, void *buf, unsigned int nbyte, loff_t offset)
+{
+ return yaffsfs_do_read(handle, buf, nbyte, 1, offset);
+}
+
+int yaffsfs_do_write(int handle, const void *vbuf, unsigned int nbyte,
+ int isPwrite, loff_t offset)
+{
+ struct yaffsfs_FileDes *fd = NULL;
+ struct yaffs_obj *obj = NULL;
+ loff_t pos = 0;
+ loff_t startPos = 0;
+ loff_t endPos;
+ int nWritten = 0;
+ int totalWritten = 0;
+ int write_trhrough = 0;
+ int nToWrite = 0;
+ const u8 *buf = (const u8 *)vbuf;
+
+ if (!vbuf) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ fd = yaffsfs_HandleToFileDes(handle);
+ obj = yaffsfs_HandleToObject(handle);
+
+ if (!fd || !obj) {
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+ totalWritten = -1;
+ } else if (!fd->writing) {
+ yaffsfs_SetError(-EINVAL);
+ totalWritten = -1;
+ } else if (obj->my_dev->read_only) {
+ yaffsfs_SetError(-EROFS);
+ totalWritten = -1;
+ } else {
+ if (fd->append)
+ startPos = yaffs_get_obj_length(obj);
+ else if (isPwrite)
+ startPos = offset;
+ else
+ startPos = fd->position;
+
+ yaffsfs_GetHandle(handle);
+ pos = startPos;
+ endPos = pos + nbyte;
+
+ if (pos < 0 || pos > YAFFS_MAX_FILE_SIZE ||
+ nbyte > YAFFS_MAX_FILE_SIZE ||
+ endPos < 0 || endPos > YAFFS_MAX_FILE_SIZE) {
+ totalWritten = -1;
+ nbyte = 0;
+ }
+
+ while (nbyte > 0) {
+
+ nToWrite = YAFFSFS_RW_SIZE -
+ (pos & (YAFFSFS_RW_SIZE - 1));
+ if (nToWrite > nbyte)
+ nToWrite = nbyte;
+
+ /* Tricky bit...
+ * Need to reverify object in case the device was
+ * remounted or unmounted in another thread.
+ */
+ obj = yaffsfs_HandleToObject(handle);
+ if (!obj || obj->my_dev->read_only)
+ nWritten = 0;
+ else
+ nWritten =
+ yaffs_wr_file(obj, buf, pos, nToWrite,
+ write_trhrough);
+ if (nWritten > 0) {
+ totalWritten += nWritten;
+ pos += nWritten;
+ buf += nWritten;
+ }
+
+ if (nWritten == nToWrite)
+ nbyte -= nToWrite;
+ else
+ nbyte = 0;
+
+ if (nWritten < 1 && totalWritten < 1) {
+ yaffsfs_SetError(-ENOSPC);
+ totalWritten = -1;
+ }
+
+ if (nbyte > 0) {
+ yaffsfs_Unlock();
+ yaffsfs_Lock();
+ }
+ }
+
+ yaffsfs_PutHandle(handle);
+
+ if (!isPwrite) {
+ if (totalWritten > 0)
+ fd->position = startPos + totalWritten;
+ else
+ yaffsfs_SetError(-EINVAL);
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return (totalWritten >= 0) ? totalWritten : -1;
+}
+
+int yaffs_write(int fd, const void *buf, unsigned int nbyte)
+{
+ return yaffsfs_do_write(fd, buf, nbyte, 0, 0);
+}
+
+int yaffs_pwrite(int fd, const void *buf, unsigned int nbyte, loff_t offset)
+{
+ return yaffsfs_do_write(fd, buf, nbyte, 1, offset);
+}
+
+int yaffs_truncate(const YCHAR *path, loff_t new_size)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int result = YAFFS_FAIL;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else if (obj->variant_type != YAFFS_OBJECT_TYPE_FILE)
+ yaffsfs_SetError(-EISDIR);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else if (new_size < 0 || new_size > YAFFS_MAX_FILE_SIZE)
+ yaffsfs_SetError(-EINVAL);
+ else
+ result = yaffs_resize_file(obj, new_size);
+
+ yaffsfs_Unlock();
+
+ return (result) ? 0 : -1;
+}
+
+int yaffs_ftruncate(int handle, loff_t new_size)
+{
+ struct yaffsfs_FileDes *fd = NULL;
+ struct yaffs_obj *obj = NULL;
+ int result = 0;
+
+ yaffsfs_Lock();
+ fd = yaffsfs_HandleToFileDes(handle);
+ obj = yaffsfs_HandleToObject(handle);
+
+ if (!fd || !obj)
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+ else if (!fd->writing)
+ yaffsfs_SetError(-EINVAL);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else if (new_size < 0 || new_size > YAFFS_MAX_FILE_SIZE)
+ yaffsfs_SetError(-EINVAL);
+ else
+ /* resize the file */
+ result = yaffs_resize_file(obj, new_size);
+ yaffsfs_Unlock();
+
+ return (result) ? 0 : -1;
+
+}
+
+loff_t yaffs_lseek(int handle, loff_t offset, int whence)
+{
+ struct yaffsfs_FileDes *fd = NULL;
+ struct yaffs_obj *obj = NULL;
+ loff_t pos = -1;
+ loff_t fSize = -1;
+
+ yaffsfs_Lock();
+ fd = yaffsfs_HandleToFileDes(handle);
+ obj = yaffsfs_HandleToObject(handle);
+
+ if (!fd || !obj)
+ yaffsfs_SetError(-EBADF);
+ else if (offset > YAFFS_MAX_FILE_SIZE)
+ yaffsfs_SetError(-EINVAL);
+ else {
+ if (whence == SEEK_SET) {
+ if (offset >= 0)
+ pos = offset;
+ } else if (whence == SEEK_CUR) {
+ if ((fd->position + offset) >= 0)
+ pos = (fd->position + offset);
+ } else if (whence == SEEK_END) {
+ fSize = yaffs_get_obj_length(obj);
+ if (fSize >= 0 && (fSize + offset) >= 0)
+ pos = fSize + offset;
+ }
+
+ if (pos >= 0 && pos <= YAFFS_MAX_FILE_SIZE)
+ fd->position = pos;
+ else {
+ yaffsfs_SetError(-EINVAL);
+ pos = -1;
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return pos;
+}
+
+int yaffsfs_DoUnlink(const YCHAR *path, int isDirectory)
+{
+ struct yaffs_obj *dir = NULL;
+ struct yaffs_obj *obj = NULL;
+ YCHAR *name;
+ int result = YAFFS_FAIL;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 0, NULL, NULL, NULL);
+ dir = yaffsfs_FindDirectory(NULL, path, &name, 0, &notDir, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir)
+ yaffsfs_SetError(-ENOENT);
+ else if (yaffs_strncmp(name, _Y("."), 2) == 0)
+ yaffsfs_SetError(-EINVAL);
+ else if (!obj)
+ yaffsfs_SetError(-ENOENT);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else if (!isDirectory &&
+ obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffsfs_SetError(-EISDIR);
+ else if (isDirectory &&
+ obj->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (isDirectory && obj == obj->my_dev->root_dir)
+ yaffsfs_SetError(-EBUSY); /* Can't rmdir a root */
+ else {
+ result = yaffs_unlinker(dir, name);
+
+ if (result == YAFFS_FAIL && isDirectory)
+ yaffsfs_SetError(-ENOTEMPTY);
+ }
+
+ yaffsfs_Unlock();
+
+ return (result == YAFFS_FAIL) ? -1 : 0;
+}
+
+int yaffs_unlink(const YCHAR *path)
+{
+ return yaffsfs_DoUnlink(path, 0);
+}
+
+int yaffs_rename(const YCHAR *oldPath, const YCHAR *newPath)
+{
+ struct yaffs_obj *olddir = NULL;
+ struct yaffs_obj *newdir = NULL;
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *newobj = NULL;
+ YCHAR *oldname;
+ YCHAR *newname;
+ int result = YAFFS_FAIL;
+ int rename_allowed = 1;
+ int notOldDir = 0;
+ int notNewDir = 0;
+ int oldLoop = 0;
+ int newLoop = 0;
+
+ YCHAR *alt_newpath = NULL;
+
+ if (!oldPath || !newPath) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(oldPath) < 0 || yaffsfs_CheckPath(newPath) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ if (yaffsfs_alt_dir_path(newPath, &alt_newpath) < 0) {
+ yaffsfs_SetError(-ENOMEM);
+ return -1;
+ }
+ if (alt_newpath)
+ newPath = alt_newpath;
+
+ yaffsfs_Lock();
+
+ olddir = yaffsfs_FindDirectory(NULL, oldPath, &oldname, 0,
+ &notOldDir, &oldLoop);
+ newdir = yaffsfs_FindDirectory(NULL, newPath, &newname, 0,
+ &notNewDir, &newLoop);
+ obj = yaffsfs_FindObject(NULL, oldPath, 0, 0, NULL, NULL, NULL);
+ newobj = yaffsfs_FindObject(NULL, newPath, 0, 0, NULL, NULL, NULL);
+
+ /* If the object being renamed is a directory and the
+ * path ended with a "/" then the olddir == obj.
+ * We pass through NULL for the old name to tell the lower layers
+ * to use olddir as the object.
+ */
+
+ if (olddir == obj)
+ oldname = NULL;
+
+ if ((!olddir && notOldDir) || (!newdir && notNewDir)) {
+ yaffsfs_SetError(-ENOTDIR);
+ rename_allowed = 0;
+ } else if (oldLoop || newLoop) {
+ yaffsfs_SetError(-ELOOP);
+ rename_allowed = 0;
+ } else if (olddir && oldname &&
+ yaffs_strncmp(oldname, _Y("."), 2) == 0) {
+ yaffsfs_SetError(-EINVAL);
+ rename_allowed = 0;
+ } else if (!olddir || !newdir || !obj) {
+ yaffsfs_SetError(-ENOENT);
+ rename_allowed = 0;
+ } else if (obj->my_dev->read_only) {
+ yaffsfs_SetError(-EROFS);
+ rename_allowed = 0;
+ } else if (yaffs_is_non_empty_dir(newobj)) {
+ yaffsfs_SetError(-ENOTEMPTY);
+ rename_allowed = 0;
+ } else if (olddir->my_dev != newdir->my_dev) {
+ /* Rename must be on same device */
+ yaffsfs_SetError(-EXDEV);
+ rename_allowed = 0;
+ } else if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+ /*
+ * It is a directory, check that it is not being renamed to
+ * being its own decendent.
+ * Do this by tracing from the new directory back to the root,
+ * checking for obj
+ */
+
+ struct yaffs_obj *xx = newdir;
+
+ while (rename_allowed && xx) {
+ if (xx == obj)
+ rename_allowed = 0;
+ xx = xx->parent;
+ }
+ if (!rename_allowed)
+ yaffsfs_SetError(-EINVAL);
+ }
+
+ if (rename_allowed)
+ result = yaffs_rename_obj(olddir, oldname, newdir, newname);
+
+ yaffsfs_Unlock();
+
+ kfree(alt_newpath);
+
+ return (result == YAFFS_FAIL) ? -1 : 0;
+}
+
+static int yaffsfs_DoStat(struct yaffs_obj *obj, struct yaffs_stat *buf)
+{
+ int retVal = -1;
+
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj && buf) {
+ buf->st_dev = (int)obj->my_dev->os_context;
+ buf->st_ino = obj->obj_id;
+ buf->st_mode = obj->yst_mode & ~S_IFMT;
+
+ if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY)
+ buf->st_mode |= S_IFDIR;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK)
+ buf->st_mode |= S_IFLNK;
+ else if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE)
+ buf->st_mode |= S_IFREG;
+
+ buf->st_nlink = yaffs_get_obj_link_count(obj);
+ buf->st_uid = 0;
+ buf->st_gid = 0;
+ buf->st_rdev = obj->yst_rdev;
+ buf->st_size = yaffs_get_obj_length(obj);
+ buf->st_blksize = obj->my_dev->data_bytes_per_chunk;
+ buf->st_blocks = lldiv(buf->st_size + buf->st_blksize - 1,
+ buf->st_blksize);
+#if CONFIG_YAFFS_WINCE
+ buf->yst_wince_atime[0] = obj->win_atime[0];
+ buf->yst_wince_atime[1] = obj->win_atime[1];
+ buf->yst_wince_ctime[0] = obj->win_ctime[0];
+ buf->yst_wince_ctime[1] = obj->win_ctime[1];
+ buf->yst_wince_mtime[0] = obj->win_mtime[0];
+ buf->yst_wince_mtime[1] = obj->win_mtime[1];
+#else
+ buf->yst_atime = obj->yst_atime;
+ buf->yst_ctime = obj->yst_ctime;
+ buf->yst_mtime = obj->yst_mtime;
+#endif
+ retVal = 0;
+ }
+ return retVal;
+}
+
+static int yaffsfs_DoStatOrLStat(const YCHAR *path,
+ struct yaffs_stat *buf, int doLStat)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path || !buf) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (!doLStat && obj)
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else
+ retVal = yaffsfs_DoStat(obj, buf);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_stat(const YCHAR *path, struct yaffs_stat *buf)
+{
+ return yaffsfs_DoStatOrLStat(path, buf, 0);
+}
+
+int yaffs_lstat(const YCHAR *path, struct yaffs_stat *buf)
+{
+ return yaffsfs_DoStatOrLStat(path, buf, 1);
+}
+
+int yaffs_fstat(int fd, struct yaffs_stat *buf)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ if (!buf) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj)
+ retVal = yaffsfs_DoStat(obj, buf);
+ else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+static int yaffsfs_DoUtime(struct yaffs_obj *obj,
+ const struct yaffs_utimbuf *buf)
+{
+ int retVal = -1;
+ int result;
+
+ struct yaffs_utimbuf local;
+
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj && obj->my_dev->read_only) {
+ yaffsfs_SetError(-EROFS);
+ return -1;
+ }
+
+ if (!buf) {
+ local.actime = Y_CURRENT_TIME;
+ local.modtime = local.actime;
+ buf = &local;
+ }
+
+ if (obj) {
+ obj->yst_atime = buf->actime;
+ obj->yst_mtime = buf->modtime;
+ obj->dirty = 1;
+ result = yaffs_flush_file(obj, 0, 0);
+ retVal = result == YAFFS_OK ? 0 : -1;
+ }
+
+ return retVal;
+}
+
+int yaffs_utime(const YCHAR *path, const struct yaffs_utimbuf *buf)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else
+ retVal = yaffsfs_DoUtime(obj, buf);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_futime(int fd, const struct yaffs_utimbuf *buf)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj)
+ retVal = yaffsfs_DoUtime(obj, buf);
+ else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+#ifndef CONFIG_YAFFS_WINCE
+/* xattrib functions */
+
+static int yaffs_do_setxattr(const YCHAR *path, const char *name,
+ const void *data, int size, int flags, int follow)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *dir;
+ int notDir = 0;
+ int loop = 0;
+
+ int retVal = -1;
+
+ if (!path || !name || !data) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (follow)
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else {
+ retVal = yaffs_set_xattrib(obj, name, data, size, flags);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_setxattr(const YCHAR *path, const char *name,
+ const void *data, int size, int flags)
+{
+ return yaffs_do_setxattr(path, name, data, size, flags, 1);
+}
+
+int yaffs_lsetxattr(const YCHAR *path, const char *name,
+ const void *data, int size, int flags)
+{
+ return yaffs_do_setxattr(path, name, data, size, flags, 0);
+}
+
+int yaffs_fsetxattr(int fd, const char *name,
+ const void *data, int size, int flags)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ if (!name || !data) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (!obj)
+ yaffsfs_SetError(-EBADF);
+ else {
+ retVal = yaffs_set_xattrib(obj, name, data, size, flags);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+static int yaffs_do_getxattr(const YCHAR *path, const char *name,
+ void *data, int size, int follow)
+{
+ struct yaffs_obj *obj;
+ struct yaffs_obj *dir;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path || !name || !data) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (follow)
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else {
+ retVal = yaffs_get_xattrib(obj, name, data, size);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ }
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_getxattr(const YCHAR *path, const char *name, void *data, int size)
+{
+ return yaffs_do_getxattr(path, name, data, size, 1);
+}
+
+int yaffs_lgetxattr(const YCHAR *path, const char *name, void *data, int size)
+{
+ return yaffs_do_getxattr(path, name, data, size, 0);
+}
+
+int yaffs_fgetxattr(int fd, const char *name, void *data, int size)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ if (!name || !data) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj) {
+ retVal = yaffs_get_xattrib(obj, name, data, size);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ } else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+static int yaffs_do_listxattr(const YCHAR *path, char *data,
+ int size, int follow)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path || !data) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (follow)
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else {
+ retVal = yaffs_list_xattrib(obj, data, size);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_listxattr(const YCHAR *path, char *data, int size)
+{
+ return yaffs_do_listxattr(path, data, size, 1);
+}
+
+int yaffs_llistxattr(const YCHAR *path, char *data, int size)
+{
+ return yaffs_do_listxattr(path, data, size, 0);
+}
+
+int yaffs_flistxattr(int fd, char *data, int size)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ if (!data) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj) {
+ retVal = yaffs_list_xattrib(obj, data, size);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ } else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+static int yaffs_do_removexattr(const YCHAR *path, const char *name,
+ int follow)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int notDir = 0;
+ int loop = 0;
+ int retVal = -1;
+
+ if (!path || !name) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (follow)
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else {
+ retVal = yaffs_remove_xattrib(obj, name);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_removexattr(const YCHAR *path, const char *name)
+{
+ return yaffs_do_removexattr(path, name, 1);
+}
+
+int yaffs_lremovexattr(const YCHAR *path, const char *name)
+{
+ return yaffs_do_removexattr(path, name, 0);
+}
+
+int yaffs_fremovexattr(int fd, const char *name)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ if (!name) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj) {
+ retVal = yaffs_remove_xattrib(obj, name);
+ if (retVal < 0) {
+ yaffsfs_SetError(retVal);
+ retVal = -1;
+ }
+ } else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+#endif
+
+#ifdef CONFIG_YAFFS_WINCE
+int yaffs_get_wince_times(int fd, unsigned *wctime,
+ unsigned *watime, unsigned *wmtime)
+{
+ struct yaffs_obj *obj;
+
+ int retVal = -1;
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj) {
+
+ if (wctime) {
+ wctime[0] = obj->win_ctime[0];
+ wctime[1] = obj->win_ctime[1];
+ }
+ if (watime) {
+ watime[0] = obj->win_atime[0];
+ watime[1] = obj->win_atime[1];
+ }
+ if (wmtime) {
+ wmtime[0] = obj->win_mtime[0];
+ wmtime[1] = obj->win_mtime[1];
+ }
+
+ retVal = 0;
+ } else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+int yaffs_set_wince_times(int fd,
+ const unsigned *wctime,
+ const unsigned *watime, const unsigned *wmtime)
+{
+ struct yaffs_obj *obj;
+ int result;
+ int retVal = -1;
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (obj) {
+
+ if (wctime) {
+ obj->win_ctime[0] = wctime[0];
+ obj->win_ctime[1] = wctime[1];
+ }
+ if (watime) {
+ obj->win_atime[0] = watime[0];
+ obj->win_atime[1] = watime[1];
+ }
+ if (wmtime) {
+ obj->win_mtime[0] = wmtime[0];
+ obj->win_mtime[1] = wmtime[1];
+ }
+
+ obj->dirty = 1;
+ result = yaffs_flush_file(obj, 0, 0);
+ retVal = 0;
+ } else
+ /* bad handle */
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+#endif
+
+static int yaffsfs_DoChMod(struct yaffs_obj *obj, mode_t mode)
+{
+ int result = -1;
+
+ if (obj)
+ obj = yaffs_get_equivalent_obj(obj);
+
+ if (obj) {
+ obj->yst_mode = mode;
+ obj->dirty = 1;
+ result = yaffs_flush_file(obj, 0, 0);
+ }
+
+ return result == YAFFS_OK ? 0 : -1;
+}
+
+int yaffs_access(const YCHAR *path, int amode)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int notDir = 0;
+ int loop = 0;
+ int retval = -1;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ if (amode & ~(R_OK | W_OK | X_OK)) {
+ yaffsfs_SetError(-EINVAL);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else if ((amode & W_OK) && obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else {
+ int access_ok = 1;
+
+ if ((amode & R_OK) && !(obj->yst_mode & S_IREAD))
+ access_ok = 0;
+ if ((amode & W_OK) && !(obj->yst_mode & S_IWRITE))
+ access_ok = 0;
+ if ((amode & X_OK) && !(obj->yst_mode & S_IEXEC))
+ access_ok = 0;
+
+ if (!access_ok)
+ yaffsfs_SetError(-EACCES);
+ else
+ retval = 0;
+ }
+
+ yaffsfs_Unlock();
+
+ return retval;
+
+}
+
+int yaffs_chmod(const YCHAR *path, mode_t mode)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ if (mode & ~(0777)) {
+ yaffsfs_SetError(-EINVAL);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+ obj = yaffsfs_FollowLink(obj, 0, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else
+ retVal = yaffsfs_DoChMod(obj, mode);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_fchmod(int fd, mode_t mode)
+{
+ struct yaffs_obj *obj;
+ int retVal = -1;
+
+ if (mode & ~(0777)) {
+ yaffsfs_SetError(-EINVAL);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ obj = yaffsfs_HandleToObject(fd);
+
+ if (!obj)
+ yaffsfs_SetError(-EBADF);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else
+ retVal = yaffsfs_DoChMod(obj, mode);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+int yaffs_mkdir(const YCHAR *path, mode_t mode)
+{
+ struct yaffs_obj *parent = NULL;
+ struct yaffs_obj *dir = NULL;
+ YCHAR *name;
+ YCHAR *alt_path = NULL;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ if (yaffsfs_alt_dir_path(path, &alt_path) < 0) {
+ yaffsfs_SetError(-ENOMEM);
+ return -1;
+ }
+ if (alt_path)
+ path = alt_path;
+
+ yaffsfs_Lock();
+ parent = yaffsfs_FindDirectory(NULL, path, &name, 0, &notDir, &loop);
+ if (!parent && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!parent)
+ yaffsfs_SetError(-ENOENT);
+ else if (yaffsfs_TooManyObjects(parent->my_dev))
+ yaffsfs_SetError(-ENFILE);
+ else if (yaffs_strnlen(name, 5) == 0) {
+ /* Trying to make the root itself */
+ yaffsfs_SetError(-EEXIST);
+ } else if (parent->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else {
+ dir = yaffs_create_dir(parent, name, mode, 0, 0);
+ if (dir)
+ retVal = 0;
+ else if (yaffs_find_by_name(parent, name))
+ yaffsfs_SetError(-EEXIST); /* name exists */
+ else
+ yaffsfs_SetError(-ENOSPC); /* assume no space */
+ }
+
+ yaffsfs_Unlock();
+
+ kfree(alt_path);
+
+ return retVal;
+}
+
+int yaffs_rmdir(const YCHAR *path)
+{
+ int result;
+ YCHAR *alt_path;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ if (yaffsfs_alt_dir_path(path, &alt_path) < 0) {
+ yaffsfs_SetError(-ENOMEM);
+ return -1;
+ }
+ if (alt_path)
+ path = alt_path;
+ result = yaffsfs_DoUnlink(path, 1);
+
+ kfree(alt_path);
+
+ return result;
+}
+
+void *yaffs_getdev(const YCHAR *path)
+{
+ struct yaffs_dev *dev = NULL;
+ YCHAR *dummy;
+ dev = yaffsfs_FindDevice(path, &dummy);
+ return (void *)dev;
+}
+
+int yaffs_mount_common(const YCHAR *path, int read_only, int skip_checkpt)
+{
+ int retVal = -1;
+ int result = YAFFS_FAIL;
+ struct yaffs_dev *dev = NULL;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffs_trace(YAFFS_TRACE_MOUNT, "yaffs: Mounting %s", path);
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ yaffsfs_InitHandles();
+
+ dev = yaffsfs_FindMountPoint(path);
+ if (dev) {
+ if (!dev->is_mounted) {
+ dev->read_only = read_only ? 1 : 0;
+ if (skip_checkpt) {
+ u8 skip = dev->param.skip_checkpt_rd;
+ dev->param.skip_checkpt_rd = 1;
+ result = yaffs_guts_initialise(dev);
+ dev->param.skip_checkpt_rd = skip;
+ } else {
+ result = yaffs_guts_initialise(dev);
+ }
+
+ if (result == YAFFS_FAIL)
+ yaffsfs_SetError(-ENOMEM);
+ retVal = result ? 0 : -1;
+
+ } else
+ yaffsfs_SetError(-EBUSY);
+ } else
+ yaffsfs_SetError(-ENODEV);
+
+ yaffsfs_Unlock();
+ return retVal;
+
+}
+
+int yaffs_mount2(const YCHAR *path, int readonly)
+{
+ return yaffs_mount_common(path, readonly, 0);
+}
+
+int yaffs_mount(const YCHAR *path)
+{
+ return yaffs_mount_common(path, 0, 0);
+}
+
+int yaffs_sync(const YCHAR *path)
+{
+ int retVal = -1;
+ struct yaffs_dev *dev = NULL;
+ YCHAR *dummy;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dev = yaffsfs_FindDevice(path, &dummy);
+ if (dev) {
+ if (!dev->is_mounted)
+ yaffsfs_SetError(-EINVAL);
+ else if (dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else {
+
+ yaffs_flush_whole_cache(dev);
+ yaffs_checkpoint_save(dev);
+ retVal = 0;
+
+ }
+ } else
+ yaffsfs_SetError(-ENODEV);
+
+ yaffsfs_Unlock();
+ return retVal;
+}
+
+static int yaffsfs_IsDevBusy(struct yaffs_dev *dev)
+{
+ int i;
+ struct yaffs_obj *obj;
+
+ for (i = 0; i < YAFFSFS_N_HANDLES; i++) {
+ obj = yaffsfs_HandleToObject(i);
+ if (obj && obj->my_dev == dev)
+ return 1;
+ }
+ return 0;
+}
+
+int yaffs_remount(const YCHAR *path, int force, int read_only)
+{
+ int retVal = -1;
+ struct yaffs_dev *dev = NULL;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dev = yaffsfs_FindMountPoint(path);
+ if (dev) {
+ if (dev->is_mounted) {
+ yaffs_flush_whole_cache(dev);
+
+ if (force || !yaffsfs_IsDevBusy(dev)) {
+ if (read_only)
+ yaffs_checkpoint_save(dev);
+ dev->read_only = read_only ? 1 : 0;
+ retVal = 0;
+ } else
+ yaffsfs_SetError(-EBUSY);
+
+ } else
+ yaffsfs_SetError(-EINVAL);
+
+ } else
+ yaffsfs_SetError(-ENODEV);
+
+ yaffsfs_Unlock();
+ return retVal;
+
+}
+
+int yaffs_unmount2(const YCHAR *path, int force)
+{
+ int retVal = -1;
+ struct yaffs_dev *dev = NULL;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dev = yaffsfs_FindMountPoint(path);
+ if (dev) {
+ if (dev->is_mounted) {
+ int inUse;
+ yaffs_flush_whole_cache(dev);
+ yaffs_checkpoint_save(dev);
+ inUse = yaffsfs_IsDevBusy(dev);
+ if (!inUse || force) {
+ if (inUse)
+ yaffsfs_BreakDeviceHandles(dev);
+ yaffs_deinitialise(dev);
+
+ retVal = 0;
+ } else
+ yaffsfs_SetError(-EBUSY);
+
+ } else
+ yaffsfs_SetError(-EINVAL);
+
+ } else
+ yaffsfs_SetError(-ENODEV);
+
+ yaffsfs_Unlock();
+ return retVal;
+
+}
+
+int yaffs_unmount(const YCHAR *path)
+{
+ return yaffs_unmount2(path, 0);
+}
+
+loff_t yaffs_freespace(const YCHAR *path)
+{
+ loff_t retVal = -1;
+ struct yaffs_dev *dev = NULL;
+ YCHAR *dummy;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dev = yaffsfs_FindDevice(path, &dummy);
+ if (dev && dev->is_mounted) {
+ retVal = yaffs_get_n_free_chunks(dev);
+ retVal *= dev->data_bytes_per_chunk;
+
+ } else
+ yaffsfs_SetError(-EINVAL);
+
+ yaffsfs_Unlock();
+ return retVal;
+}
+
+loff_t yaffs_totalspace(const YCHAR *path)
+{
+ loff_t retVal = -1;
+ struct yaffs_dev *dev = NULL;
+ YCHAR *dummy;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dev = yaffsfs_FindDevice(path, &dummy);
+ if (dev && dev->is_mounted) {
+ retVal = (dev->param.end_block - dev->param.start_block + 1) -
+ dev->param.n_reserved_blocks;
+ retVal *= dev->param.chunks_per_block;
+ retVal *= dev->data_bytes_per_chunk;
+
+ } else
+ yaffsfs_SetError(-EINVAL);
+
+ yaffsfs_Unlock();
+ return retVal;
+}
+
+int yaffs_inodecount(const YCHAR *path)
+{
+ loff_t retVal = -1;
+ struct yaffs_dev *dev = NULL;
+ YCHAR *dummy;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dev = yaffsfs_FindDevice(path, &dummy);
+ if (dev && dev->is_mounted) {
+ int n_obj = dev->n_obj;
+ if (n_obj > dev->n_hardlinks)
+ retVal = n_obj - dev->n_hardlinks;
+ }
+
+ if (retVal < 0)
+ yaffsfs_SetError(-EINVAL);
+
+ yaffsfs_Unlock();
+ return retVal;
+}
+
+void yaffs_add_device(struct yaffs_dev *dev)
+{
+ struct list_head *cfg;
+ /* First check that the device is not in the list. */
+
+ list_for_each(cfg, &yaffsfs_deviceList) {
+ if (dev == list_entry(cfg, struct yaffs_dev, dev_list))
+ return;
+ }
+
+ dev->is_mounted = 0;
+ dev->param.remove_obj_fn = yaffsfs_RemoveObjectCallback;
+
+ if (!dev->dev_list.next)
+ INIT_LIST_HEAD(&dev->dev_list);
+
+ list_add(&dev->dev_list, &yaffsfs_deviceList);
+}
+
+void yaffs_remove_device(struct yaffs_dev *dev)
+{
+ list_del_init(&dev->dev_list);
+}
+
+/* Functions to iterate through devices. NB Use with extreme care! */
+
+static struct list_head *dev_iterator;
+void yaffs_dev_rewind(void)
+{
+ dev_iterator = yaffsfs_deviceList.next;
+}
+
+struct yaffs_dev *yaffs_next_dev(void)
+{
+ struct yaffs_dev *retval;
+
+ if (!dev_iterator)
+ return NULL;
+ if (dev_iterator == &yaffsfs_deviceList)
+ return NULL;
+
+ retval = list_entry(dev_iterator, struct yaffs_dev, dev_list);
+ dev_iterator = dev_iterator->next;
+ return retval;
+}
+
+/* Directory search stuff. */
+
+static struct list_head search_contexts;
+
+static void yaffsfs_SetDirRewound(struct yaffsfs_DirSearchContxt *dsc)
+{
+ if (dsc &&
+ dsc->dirObj &&
+ dsc->dirObj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+
+ dsc->offset = 0;
+
+ if (list_empty(&dsc->dirObj->variant.dir_variant.children))
+ dsc->nextReturn = NULL;
+ else
+ dsc->nextReturn =
+ list_entry(dsc->dirObj->variant.dir_variant.
+ children.next, struct yaffs_obj,
+ siblings);
+ } else {
+ /* Hey someone isn't playing nice! */
+ }
+}
+
+static void yaffsfs_DirAdvance(struct yaffsfs_DirSearchContxt *dsc)
+{
+ if (dsc &&
+ dsc->dirObj &&
+ dsc->dirObj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) {
+
+ if (dsc->nextReturn == NULL ||
+ list_empty(&dsc->dirObj->variant.dir_variant.children))
+ dsc->nextReturn = NULL;
+ else {
+ struct list_head *next = dsc->nextReturn->siblings.next;
+
+ if (next == &dsc->dirObj->variant.dir_variant.children)
+ dsc->nextReturn = NULL; /* end of list */
+ else
+ dsc->nextReturn = list_entry(next,
+ struct yaffs_obj,
+ siblings);
+ }
+ } else {
+ /* Hey someone isn't playing nice! */
+ }
+}
+
+static void yaffsfs_RemoveObjectCallback(struct yaffs_obj *obj)
+{
+
+ struct list_head *i;
+ struct yaffsfs_DirSearchContxt *dsc;
+
+ /* if search contexts not initilised then skip */
+ if (!search_contexts.next)
+ return;
+
+ /* Iterate through the directory search contexts.
+ * If any are the one being removed, then advance the dsc to
+ * the next one to prevent a hanging ptr.
+ */
+ list_for_each(i, &search_contexts) {
+ if (i) {
+ dsc = list_entry(i, struct yaffsfs_DirSearchContxt,
+ others);
+ if (dsc->nextReturn == obj)
+ yaffsfs_DirAdvance(dsc);
+ }
+ }
+
+}
+
+yaffs_DIR *yaffs_opendir(const YCHAR *dirname)
+{
+ yaffs_DIR *dir = NULL;
+ struct yaffs_obj *obj = NULL;
+ struct yaffsfs_DirSearchContxt *dsc = NULL;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!dirname) {
+ yaffsfs_SetError(-EFAULT);
+ return NULL;
+ }
+
+ if (yaffsfs_CheckPath(dirname) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return NULL;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, dirname, 0, 1, NULL, &notDir, &loop);
+
+ if (!obj && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!obj)
+ yaffsfs_SetError(-ENOENT);
+ else if (obj->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY)
+ yaffsfs_SetError(-ENOTDIR);
+ else {
+ int i;
+
+ for (i = 0, dsc = NULL; i < YAFFSFS_N_DSC && !dsc; i++) {
+ if (!yaffsfs_dsc[i].inUse)
+ dsc = &yaffsfs_dsc[i];
+ }
+
+ dir = (yaffs_DIR *) dsc;
+
+ if (dsc) {
+ memset(dsc, 0, sizeof(struct yaffsfs_DirSearchContxt));
+ dsc->inUse = 1;
+ dsc->dirObj = obj;
+ yaffs_strncpy(dsc->name, dirname, NAME_MAX);
+ INIT_LIST_HEAD(&dsc->others);
+
+ if (!search_contexts.next)
+ INIT_LIST_HEAD(&search_contexts);
+
+ list_add(&dsc->others, &search_contexts);
+ yaffsfs_SetDirRewound(dsc);
+ }
+
+ }
+
+ yaffsfs_Unlock();
+
+ return dir;
+}
+
+struct yaffs_dirent *yaffs_readdir(yaffs_DIR * dirp)
+{
+ struct yaffsfs_DirSearchContxt *dsc;
+ struct yaffs_dirent *retVal = NULL;
+
+ dsc = (struct yaffsfs_DirSearchContxt *) dirp;
+ yaffsfs_Lock();
+
+ if (dsc && dsc->inUse) {
+ yaffsfs_SetError(0);
+ if (dsc->nextReturn) {
+ dsc->de.d_ino =
+ yaffs_get_equivalent_obj(dsc->nextReturn)->obj_id;
+ dsc->de.d_dont_use = (unsigned)dsc->nextReturn;
+ dsc->de.d_off = dsc->offset++;
+ yaffs_get_obj_name(dsc->nextReturn,
+ dsc->de.d_name, NAME_MAX);
+ if (yaffs_strnlen(dsc->de.d_name, NAME_MAX + 1) == 0) {
+ /* this should not happen! */
+ yaffs_strcpy(dsc->de.d_name, _Y("zz"));
+ }
+ dsc->de.d_reclen = sizeof(struct yaffs_dirent);
+ retVal = &dsc->de;
+ yaffsfs_DirAdvance(dsc);
+ } else
+ retVal = NULL;
+ } else
+ yaffsfs_SetError(-EBADF);
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+void yaffs_rewinddir(yaffs_DIR *dirp)
+{
+ struct yaffsfs_DirSearchContxt *dsc;
+
+ dsc = (struct yaffsfs_DirSearchContxt *) dirp;
+
+ yaffsfs_Lock();
+
+ yaffsfs_SetDirRewound(dsc);
+
+ yaffsfs_Unlock();
+}
+
+int yaffs_closedir(yaffs_DIR *dirp)
+{
+ struct yaffsfs_DirSearchContxt *dsc;
+
+ dsc = (struct yaffsfs_DirSearchContxt *) dirp;
+
+ if (!dsc) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ dsc->inUse = 0;
+ list_del(&dsc->others); /* unhook from list */
+ yaffsfs_Unlock();
+ return 0;
+}
+
+/* End of directory stuff */
+
+int yaffs_symlink(const YCHAR *oldpath, const YCHAR *newpath)
+{
+ struct yaffs_obj *parent = NULL;
+ struct yaffs_obj *obj;
+ YCHAR *name;
+ int retVal = -1;
+ int mode = 0; /* ignore for now */
+ int notDir = 0;
+ int loop = 0;
+
+ if (!oldpath || !newpath) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(newpath) < 0 || yaffsfs_CheckPath(oldpath) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+ parent = yaffsfs_FindDirectory(NULL, newpath, &name, 0, &notDir, &loop);
+ if (!parent && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!parent || yaffs_strnlen(name, 5) < 1)
+ yaffsfs_SetError(-ENOENT);
+ else if (yaffsfs_TooManyObjects(parent->my_dev))
+ yaffsfs_SetError(-ENFILE);
+ else if (parent->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else if (parent) {
+ obj = yaffs_create_symlink(parent, name, mode, 0, 0, oldpath);
+ if (obj)
+ retVal = 0;
+ else if (yaffsfs_FindObject
+ (NULL, newpath, 0, 0, NULL, NULL, NULL))
+ yaffsfs_SetError(-EEXIST);
+ else
+ yaffsfs_SetError(-ENOSPC);
+ }
+
+ yaffsfs_Unlock();
+
+ return retVal;
+
+}
+
+int yaffs_readlink(const YCHAR *path, YCHAR *buf, int bufsiz)
+{
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *dir = NULL;
+ int retVal = -1;
+ int notDir = 0;
+ int loop = 0;
+
+ if (!path || !buf) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, &dir, &notDir, &loop);
+
+ if (!dir && notDir)
+ yaffsfs_SetError(-ENOTDIR);
+ else if (loop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else if (obj->variant_type != YAFFS_OBJECT_TYPE_SYMLINK)
+ yaffsfs_SetError(-EINVAL);
+ else {
+ YCHAR *alias = obj->variant.symlink_variant.alias;
+ memset(buf, 0, bufsiz);
+ yaffs_strncpy(buf, alias, bufsiz - 1);
+ retVal = 0;
+ }
+ yaffsfs_Unlock();
+ return retVal;
+}
+
+int yaffs_link(const YCHAR *oldpath, const YCHAR *linkpath)
+{
+ /* Creates a link called newpath to existing oldpath */
+ struct yaffs_obj *obj = NULL;
+ struct yaffs_obj *lnk = NULL;
+ struct yaffs_obj *obj_dir = NULL;
+ struct yaffs_obj *lnk_dir = NULL;
+ int retVal = -1;
+ int notDirObj = 0;
+ int notDirLnk = 0;
+ int objLoop = 0;
+ int lnkLoop = 0;
+ YCHAR *newname;
+
+ if (!oldpath || !linkpath) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(linkpath) < 0 || yaffsfs_CheckPath(oldpath) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ yaffsfs_Lock();
+
+ obj = yaffsfs_FindObject(NULL, oldpath, 0, 1,
+ &obj_dir, &notDirObj, &objLoop);
+ lnk = yaffsfs_FindObject(NULL, linkpath, 0, 0, NULL, NULL, NULL);
+ lnk_dir = yaffsfs_FindDirectory(NULL, linkpath, &newname,
+ 0, &notDirLnk, &lnkLoop);
+
+ if ((!obj_dir && notDirObj) || (!lnk_dir && notDirLnk))
+ yaffsfs_SetError(-ENOTDIR);
+ else if (objLoop || lnkLoop)
+ yaffsfs_SetError(-ELOOP);
+ else if (!obj_dir || !lnk_dir || !obj)
+ yaffsfs_SetError(-ENOENT);
+ else if (obj->my_dev->read_only)
+ yaffsfs_SetError(-EROFS);
+ else if (yaffsfs_TooManyObjects(obj->my_dev))
+ yaffsfs_SetError(-ENFILE);
+ else if (lnk)
+ yaffsfs_SetError(-EEXIST);
+ else if (lnk_dir->my_dev != obj->my_dev)
+ yaffsfs_SetError(-EXDEV);
+ else {
+ retVal = yaffsfs_CheckNameLength(newname);
+
+ if (retVal == 0) {
+ lnk = yaffs_link_obj(lnk_dir, newname, obj);
+ if (lnk)
+ retVal = 0;
+ else {
+ yaffsfs_SetError(-ENOSPC);
+ retVal = -1;
+ }
+ }
+ }
+ yaffsfs_Unlock();
+
+ return retVal;
+}
+
+int yaffs_mknod(const YCHAR *pathname, mode_t mode, dev_t dev)
+{
+ pathname = pathname;
+ mode = mode;
+ dev = dev;
+
+ yaffsfs_SetError(-EINVAL);
+ return -1;
+}
+
+/*
+ * D E B U G F U N C T I O N S
+ */
+
+/*
+ * yaffs_n_handles()
+ * Returns number of handles attached to the object
+ */
+int yaffs_n_handles(const YCHAR *path)
+{
+ struct yaffs_obj *obj;
+
+ if (!path) {
+ yaffsfs_SetError(-EFAULT);
+ return -1;
+ }
+
+ if (yaffsfs_CheckPath(path) < 0) {
+ yaffsfs_SetError(-ENAMETOOLONG);
+ return -1;
+ }
+
+ obj = yaffsfs_FindObject(NULL, path, 0, 1, NULL, NULL, NULL);
+
+ if (obj)
+ return yaffsfs_CountHandles(obj);
+ else
+ return -1;
+}
+
+int yaffs_get_error(void)
+{
+ return yaffsfs_GetLastError();
+}
+
+int yaffs_set_error(int error)
+{
+ yaffsfs_SetError(error);
+ return 0;
+}
+
+int yaffs_dump_dev(const YCHAR *path)
+{
+#if 1
+ path = path;
+#else
+ YCHAR *rest;
+
+ struct yaffs_obj *obj = yaffsfs_FindRoot(path, &rest);
+
+ if (obj) {
+ struct yaffs_dev *dev = obj->my_dev;
+
+ printf("\n"
+ "n_page_writes.......... %d\n"
+ "n_page_reads........... %d\n"
+ "n_erasures....... %d\n"
+ "n_gc_copies............ %d\n"
+ "garbageCollections... %d\n"
+ "passiveGarbageColl'ns %d\n"
+ "\n",
+ dev->n_page_writes,
+ dev->n_page_reads,
+ dev->n_erasures,
+ dev->n_gc_copies,
+ dev->garbageCollections, dev->passiveGarbageCollections);
+
+ }
+#endif
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/yaffs2/yaffsfs.h b/qemu/roms/u-boot/fs/yaffs2/yaffsfs.h
new file mode 100644
index 000000000..f2c766662
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yaffsfs.h
@@ -0,0 +1,209 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * Header file for using yaffs in an application via
+ * a direct interface.
+ */
+
+
+#ifndef __YAFFSFS_H__
+#define __YAFFSFS_H__
+
+#include "yaffscfg.h"
+#include "yportenv.h"
+
+
+#ifndef NAME_MAX
+#define NAME_MAX 256
+#endif
+
+#define YAFFS_MAX_FILE_SIZE (0x800000000LL - 1)
+
+
+struct yaffs_dirent {
+ long d_ino; /* inode number */
+ off_t d_off; /* offset to this dirent */
+ unsigned short d_reclen; /* length of this dirent */
+ YUCHAR d_type; /* type of this record */
+ YCHAR d_name[NAME_MAX+1]; /* file name (null-terminated) */
+ unsigned d_dont_use; /* debug: not for public consumption */
+};
+
+typedef struct opaque_structure yaffs_DIR;
+
+
+
+struct yaffs_stat {
+ int st_dev; /* device */
+ int st_ino; /* inode */
+ unsigned st_mode; /* protection */
+ int st_nlink; /* number of hard links */
+ int st_uid; /* user ID of owner */
+ int st_gid; /* group ID of owner */
+ unsigned st_rdev; /* device type (if inode device) */
+ loff_t st_size; /* total size, in bytes */
+ unsigned long st_blksize; /* blocksize for filesystem I/O */
+ unsigned long st_blocks; /* number of blocks allocated */
+#ifdef CONFIG_YAFFS_WINCE
+ /* Special 64-bit times for WinCE */
+ unsigned long yst_wince_atime[2];
+ unsigned long yst_wince_mtime[2];
+ unsigned long yst_wince_ctime[2];
+#else
+ unsigned long yst_atime; /* time of last access */
+ unsigned long yst_mtime; /* time of last modification */
+ unsigned long yst_ctime; /* time of last change */
+#endif
+};
+
+
+struct yaffs_utimbuf {
+ unsigned long actime;
+ unsigned long modtime;
+};
+
+
+int yaffs_open(const YCHAR *path, int oflag, int mode) ;
+
+int yaffs_close(int fd) ;
+int yaffs_fsync(int fd) ;
+int yaffs_fdatasync(int fd) ;
+int yaffs_flush(int fd) ; /* same as yaffs_fsync() */
+
+int yaffs_access(const YCHAR *path, int amode);
+
+int yaffs_dup(int fd);
+
+int yaffs_read(int fd, void *buf, unsigned int nbyte) ;
+int yaffs_write(int fd, const void *buf, unsigned int nbyte) ;
+
+int yaffs_pread(int fd, void *buf, unsigned int nbyte, loff_t offset);
+int yaffs_pwrite(int fd, const void *buf, unsigned int nbyte, loff_t offset);
+
+loff_t yaffs_lseek(int fd, loff_t offset, int whence) ;
+
+int yaffs_truncate(const YCHAR *path, loff_t new_size);
+int yaffs_ftruncate(int fd, loff_t new_size);
+
+int yaffs_unlink(const YCHAR *path) ;
+int yaffs_rename(const YCHAR *oldPath, const YCHAR *newPath) ;
+
+int yaffs_stat(const YCHAR *path, struct yaffs_stat *buf) ;
+int yaffs_lstat(const YCHAR *path, struct yaffs_stat *buf) ;
+int yaffs_fstat(int fd, struct yaffs_stat *buf) ;
+
+int yaffs_utime(const YCHAR *path, const struct yaffs_utimbuf *buf);
+int yaffs_futime(int fd, const struct yaffs_utimbuf *buf);
+
+
+int yaffs_setxattr(const char *path, const char *name,
+ const void *data, int size, int flags);
+int yaffs_lsetxattr(const char *path, const char *name,
+ const void *data, int size, int flags);
+int yaffs_fsetxattr(int fd, const char *name,
+ const void *data, int size, int flags);
+
+int yaffs_getxattr(const char *path, const char *name,
+ void *data, int size);
+int yaffs_lgetxattr(const char *path, const char *name,
+ void *data, int size);
+int yaffs_fgetxattr(int fd, const char *name,
+ void *data, int size);
+
+int yaffs_removexattr(const char *path, const char *name);
+int yaffs_lremovexattr(const char *path, const char *name);
+int yaffs_fremovexattr(int fd, const char *name);
+
+int yaffs_listxattr(const char *path, char *list, int size);
+int yaffs_llistxattr(const char *path, char *list, int size);
+int yaffs_flistxattr(int fd, char *list, int size);
+
+
+#ifdef CONFIG_YAFFS_WINCE
+
+int yaffs_set_wince_times(int fd,
+ const unsigned *wctime,
+ const unsigned *watime,
+ const unsigned *wmtime);
+int yaffs_get_wince_times(int fd,
+ unsigned *wctime,
+ unsigned *watime,
+ unsigned *wmtime);
+
+#endif
+
+int yaffs_chmod(const YCHAR *path, mode_t mode);
+int yaffs_fchmod(int fd, mode_t mode);
+
+int yaffs_mkdir(const YCHAR *path, mode_t mode) ;
+int yaffs_rmdir(const YCHAR *path) ;
+
+yaffs_DIR *yaffs_opendir(const YCHAR *dirname) ;
+struct yaffs_dirent *yaffs_readdir(yaffs_DIR *dirp) ;
+void yaffs_rewinddir(yaffs_DIR *dirp) ;
+int yaffs_closedir(yaffs_DIR *dirp) ;
+
+int yaffs_mount(const YCHAR *path) ;
+int yaffs_mount2(const YCHAR *path, int read_only);
+int yaffs_mount_common(const YCHAR *path, int read_only, int skip_checkpt);
+
+int yaffs_unmount(const YCHAR *path) ;
+int yaffs_unmount2(const YCHAR *path, int force);
+int yaffs_remount(const YCHAR *path, int force, int read_only);
+
+
+int yaffs_sync(const YCHAR *path) ;
+
+int yaffs_symlink(const YCHAR *oldpath, const YCHAR *newpath);
+int yaffs_readlink(const YCHAR *path, YCHAR *buf, int bufsiz);
+
+int yaffs_link(const YCHAR *oldpath, const YCHAR *newpath);
+int yaffs_mknod(const YCHAR *pathname, mode_t mode, dev_t dev);
+
+loff_t yaffs_freespace(const YCHAR *path);
+loff_t yaffs_totalspace(const YCHAR *path);
+
+int yaffs_inodecount(const YCHAR *path);
+
+int yaffs_n_handles(const YCHAR *path);
+
+#define YAFFS_SHARE_READ 1
+#define YAFFS_SHARE_WRITE 2
+int yaffs_open_sharing(const YCHAR *path, int oflag, int mode, int shareMode);
+
+struct yaffs_dev;
+void yaffs_add_device(struct yaffs_dev *dev);
+
+int yaffs_start_up(void);
+int yaffsfs_GetLastError(void);
+
+/* Functions to iterate through devices. NB Use with extreme care! */
+void yaffs_dev_rewind(void);
+struct yaffs_dev *yaffs_next_dev(void);
+
+/* Function to get the last error */
+int yaffs_get_error(void);
+const char *yaffs_error_to_str(int err);
+
+/* Function only for debugging */
+void *yaffs_getdev(const YCHAR *path);
+int yaffs_dump_dev(const YCHAR *path);
+int yaffs_set_error(int error);
+
+/* Trace control functions */
+unsigned yaffs_set_trace(unsigned tm);
+unsigned yaffs_get_trace(void);
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/ydirectenv.h b/qemu/roms/u-boot/fs/yaffs2/ydirectenv.h
new file mode 100644
index 000000000..c6614f13b
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/ydirectenv.h
@@ -0,0 +1,84 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+/*
+ * ydirectenv.h: Environment wrappers for YAFFS direct.
+ */
+
+#ifndef __YDIRECTENV_H__
+#define __YDIRECTENV_H__
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/compat.h>
+
+#include "yaffs_osglue.h"
+
+void yaffs_bug_fn(const char *file_name, int line_no);
+
+
+
+#define YCHAR char
+#define YUCHAR unsigned char
+#define _Y(x) x
+
+#define yaffs_strcat(a, b) strcat(a, b)
+#define yaffs_strcpy(a, b) strcpy(a, b)
+#define yaffs_strncpy(a, b, c) strncpy(a, b, c)
+#define yaffs_strnlen(s, m) strnlen(s, m)
+#ifdef CONFIG_YAFFS_CASE_INSENSITIVE
+#define yaffs_strcmp(a, b) strcasecmp(a, b)
+#define yaffs_strncmp(a, b, c) strncasecmp(a, b, c)
+#else
+#define yaffs_strcmp(a, b) strcmp(a, b)
+#define yaffs_strncmp(a, b, c) strncmp(a, b, c)
+#endif
+
+
+void yaffs_qsort(void *aa, size_t n, size_t es,
+ int (*cmp)(const void *, const void *));
+
+#define sort(base, n, sz, cmp_fn, swp) yaffs_qsort(base, n, sz, cmp_fn)
+
+#define YAFFS_PATH_DIVIDERS "/"
+
+#ifdef NO_inline
+#define inline
+#endif
+
+#define cond_resched() do {} while (0)
+
+#define yaffs_trace(msk, fmt, ...) do { \
+ if (yaffs_trace_mask & (msk)) \
+ printf("yaffs: " fmt "\n", ##__VA_ARGS__); \
+} while (0)
+
+
+#define YAFFS_LOSTNFOUND_NAME "lost+found"
+#define YAFFS_LOSTNFOUND_PREFIX "obj"
+
+#include "yaffscfg.h"
+
+#define Y_CURRENT_TIME yaffsfs_CurrentTime()
+#define Y_TIME_CONVERT(x) x
+
+#define YAFFS_ROOT_MODE 0666
+#define YAFFS_LOSTNFOUND_MODE 0666
+
+#include "linux/list.h"
+
+#include "yaffsfs.h"
+
+#endif
diff --git a/qemu/roms/u-boot/fs/yaffs2/yportenv.h b/qemu/roms/u-boot/fs/yaffs2/yportenv.h
new file mode 100644
index 000000000..251eba079
--- /dev/null
+++ b/qemu/roms/u-boot/fs/yaffs2/yportenv.h
@@ -0,0 +1,309 @@
+/*
+ * YAFFS: Yet another Flash File System . A NAND-flash specific file system.
+ *
+ * Copyright (C) 2002-2011 Aleph One Ltd.
+ * for Toby Churchill Ltd and Brightstar Engineering
+ *
+ * Created by Charles Manning <charles@aleph1.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ *
+ * Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
+ */
+
+
+#ifndef __YPORTENV_H__
+#define __YPORTENV_H__
+
+#include <linux/types.h>
+
+/* Definition of types */
+#ifdef CONFIG_YAFFS_DEFINES_TYPES
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned u32;
+#endif
+
+
+#ifdef CONFIG_YAFFS_PROVIDE_DEFS
+/* File types */
+
+
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+
+/*
+ * Attribute flags.
+ * These are or-ed together to select what has been changed.
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+
+struct iattr {
+ unsigned int ia_valid;
+ unsigned ia_mode;
+ unsigned ia_uid;
+ unsigned ia_gid;
+ unsigned ia_size;
+ unsigned ia_atime;
+ unsigned ia_mtime;
+ unsigned ia_ctime;
+ unsigned int ia_attr_flags;
+};
+
+#endif
+
+
+
+#if defined CONFIG_YAFFS_WINCE
+
+#include "ywinceenv.h"
+
+
+#elif defined CONFIG_YAFFS_DIRECT
+
+/* Direct interface */
+#include "ydirectenv.h"
+
+#elif defined CONFIG_YAFFS_UTIL
+
+#include "yutilsenv.h"
+
+#else
+/* Should have specified a configuration type */
+#error Unknown configuration
+
+#endif
+
+#if defined(CONFIG_YAFFS_DIRECT) || defined(CONFIG_YAFFS_WINCE)
+
+#ifdef CONFIG_YAFFSFS_PROVIDE_VALUES
+
+#ifndef O_RDONLY
+#define O_RDONLY 00
+#endif
+
+#ifndef O_WRONLY
+#define O_WRONLY 01
+#endif
+
+#ifndef O_RDWR
+#define O_RDWR 02
+#endif
+
+#ifndef O_CREAT
+#define O_CREAT 0100
+#endif
+
+#ifndef O_EXCL
+#define O_EXCL 0200
+#endif
+
+#ifndef O_TRUNC
+#define O_TRUNC 01000
+#endif
+
+#ifndef O_APPEND
+#define O_APPEND 02000
+#endif
+
+#ifndef SEEK_SET
+#define SEEK_SET 0
+#endif
+
+#ifndef SEEK_CUR
+#define SEEK_CUR 1
+#endif
+
+#ifndef SEEK_END
+#define SEEK_END 2
+#endif
+
+#ifndef EBUSY
+#define EBUSY 16
+#endif
+
+#ifndef ENODEV
+#define ENODEV 19
+#endif
+
+#ifndef EINVAL
+#define EINVAL 22
+#endif
+
+#ifndef ENFILE
+#define ENFILE 23
+#endif
+
+#ifndef EBADF
+#define EBADF 9
+#endif
+
+#ifndef EACCES
+#define EACCES 13
+#endif
+
+#ifndef EXDEV
+#define EXDEV 18
+#endif
+
+#ifndef ENOENT
+#define ENOENT 2
+#endif
+
+#ifndef ENOSPC
+#define ENOSPC 28
+#endif
+
+#ifndef EROFS
+#define EROFS 30
+#endif
+
+#ifndef ERANGE
+#define ERANGE 34
+#endif
+
+#ifndef ENODATA
+#define ENODATA 61
+#endif
+
+#ifndef ENOTEMPTY
+#define ENOTEMPTY 39
+#endif
+
+#ifndef ENAMETOOLONG
+#define ENAMETOOLONG 36
+#endif
+
+#ifndef ENOMEM
+#define ENOMEM 12
+#endif
+
+#ifndef EFAULT
+#define EFAULT 14
+#endif
+
+#ifndef EEXIST
+#define EEXIST 17
+#endif
+
+#ifndef ENOTDIR
+#define ENOTDIR 20
+#endif
+
+#ifndef EISDIR
+#define EISDIR 21
+#endif
+
+#ifndef ELOOP
+#define ELOOP 40
+#endif
+
+
+/* Mode flags */
+
+#ifndef S_IFMT
+#define S_IFMT 0170000
+#endif
+
+#ifndef S_IFSOCK
+#define S_IFSOCK 0140000
+#endif
+
+#ifndef S_IFIFO
+#define S_IFIFO 0010000
+#endif
+
+#ifndef S_IFCHR
+#define S_IFCHR 0020000
+#endif
+
+#ifndef S_IFBLK
+#define S_IFBLK 0060000
+#endif
+
+#ifndef S_IFLNK
+#define S_IFLNK 0120000
+#endif
+
+#ifndef S_IFDIR
+#define S_IFDIR 0040000
+#endif
+
+#ifndef S_IFREG
+#define S_IFREG 0100000
+#endif
+
+#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK)
+#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK)
+#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
+#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
+#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK)
+#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR)
+#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO)
+
+
+#ifndef S_IREAD
+#define S_IREAD 0000400
+#endif
+
+#ifndef S_IWRITE
+#define S_IWRITE 0000200
+#endif
+
+#ifndef S_IEXEC
+#define S_IEXEC 0000100
+#endif
+
+#ifndef XATTR_CREATE
+#define XATTR_CREATE 1
+#endif
+
+#ifndef XATTR_REPLACE
+#define XATTR_REPLACE 2
+#endif
+
+#ifndef R_OK
+#define R_OK 4
+#define W_OK 2
+#define X_OK 1
+#define F_OK 0
+#endif
+
+#else
+#include <errno.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#endif
+
+#endif
+
+#ifndef Y_DUMP_STACK
+#define Y_DUMP_STACK() do { } while (0)
+#endif
+
+#ifndef BUG
+#define BUG() do {\
+ yaffs_trace(YAFFS_TRACE_BUG,\
+ "==>> yaffs bug: " __FILE__ " %d",\
+ __LINE__);\
+ Y_DUMP_STACK();\
+} while (0)
+#endif
+
+#endif
diff --git a/qemu/roms/u-boot/fs/zfs/Makefile b/qemu/roms/u-boot/fs/zfs/Makefile
new file mode 100644
index 000000000..fa58b7fcd
--- /dev/null
+++ b/qemu/roms/u-boot/fs/zfs/Makefile
@@ -0,0 +1,8 @@
+#
+# (C) Copyright 2012
+# Jorgen Lundman <lundman at lundman.net>
+#
+# SPDX-License-Identifier: GPL-2.0+
+#
+
+obj-y := dev.o zfs.o zfs_fletcher.o zfs_sha256.o zfs_lzjb.o
diff --git a/qemu/roms/u-boot/fs/zfs/dev.c b/qemu/roms/u-boot/fs/zfs/dev.c
new file mode 100644
index 000000000..3a1fa5685
--- /dev/null
+++ b/qemu/roms/u-boot/fs/zfs/dev.c
@@ -0,0 +1,112 @@
+/*
+ *
+ * based on code of fs/reiserfs/dev.c by
+ *
+ * (C) Copyright 2003 - 2004
+ * Sysgo AG, <www.elinos.com>, Pavel Bartusek <pba@sysgo.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+
+#include <common.h>
+#include <config.h>
+#include <zfs_common.h>
+
+static block_dev_desc_t *zfs_block_dev_desc;
+static disk_partition_t *part_info;
+
+void zfs_set_blk_dev(block_dev_desc_t *rbdd, disk_partition_t *info)
+{
+ zfs_block_dev_desc = rbdd;
+ part_info = info;
+}
+
+/* err */
+int zfs_devread(int sector, int byte_offset, int byte_len, char *buf)
+{
+ short sec_buffer[SECTOR_SIZE/sizeof(short)];
+ char *sec_buf = (char *)sec_buffer;
+ unsigned block_len;
+
+ /*
+ * Check partition boundaries
+ */
+ if ((sector < 0) ||
+ ((sector + ((byte_offset + byte_len - 1) >> SECTOR_BITS)) >=
+ part_info->size)) {
+ /* errnum = ERR_OUTSIDE_PART; */
+ printf(" ** zfs_devread() read outside partition sector %d\n", sector);
+ return 1;
+ }
+
+ /*
+ * Get the read to the beginning of a partition.
+ */
+ sector += byte_offset >> SECTOR_BITS;
+ byte_offset &= SECTOR_SIZE - 1;
+
+ debug(" <%d, %d, %d>\n", sector, byte_offset, byte_len);
+
+ if (zfs_block_dev_desc == NULL) {
+ printf("** Invalid Block Device Descriptor (NULL)\n");
+ return 1;
+ }
+
+ if (byte_offset != 0) {
+ /* read first part which isn't aligned with start of sector */
+ if (zfs_block_dev_desc->block_read(zfs_block_dev_desc->dev,
+ part_info->start + sector, 1,
+ (unsigned long *)sec_buf) != 1) {
+ printf(" ** zfs_devread() read error **\n");
+ return 1;
+ }
+ memcpy(buf, sec_buf + byte_offset,
+ min(SECTOR_SIZE - byte_offset, byte_len));
+ buf += min(SECTOR_SIZE - byte_offset, byte_len);
+ byte_len -= min(SECTOR_SIZE - byte_offset, byte_len);
+ sector++;
+ }
+
+ if (byte_len == 0)
+ return 0;
+
+ /* read sector aligned part */
+ block_len = byte_len & ~(SECTOR_SIZE - 1);
+
+ if (block_len == 0) {
+ u8 p[SECTOR_SIZE];
+
+ block_len = SECTOR_SIZE;
+ zfs_block_dev_desc->block_read(zfs_block_dev_desc->dev,
+ part_info->start + sector,
+ 1, (unsigned long *)p);
+ memcpy(buf, p, byte_len);
+ return 0;
+ }
+
+ if (zfs_block_dev_desc->block_read(zfs_block_dev_desc->dev,
+ part_info->start + sector, block_len / SECTOR_SIZE,
+ (unsigned long *) buf) != block_len / SECTOR_SIZE) {
+ printf(" ** zfs_devread() read error - block\n");
+ return 1;
+ }
+
+ block_len = byte_len & ~(SECTOR_SIZE - 1);
+ buf += block_len;
+ byte_len -= block_len;
+ sector += block_len / SECTOR_SIZE;
+
+ if (byte_len != 0) {
+ /* read rest of data which are not in whole sector */
+ if (zfs_block_dev_desc->
+ block_read(zfs_block_dev_desc->dev,
+ part_info->start + sector, 1,
+ (unsigned long *) sec_buf) != 1) {
+ printf(" ** zfs_devread() read error - last part\n");
+ return 1;
+ }
+ memcpy(buf, sec_buf, byte_len);
+ }
+ return 0;
+}
diff --git a/qemu/roms/u-boot/fs/zfs/zfs.c b/qemu/roms/u-boot/fs/zfs/zfs.c
new file mode 100644
index 000000000..099d51718
--- /dev/null
+++ b/qemu/roms/u-boot/fs/zfs/zfs.c
@@ -0,0 +1,2334 @@
+/*
+ *
+ * ZFS filesystem ported to u-boot by
+ * Jorgen Lundman <lundman at lundman.net>
+ *
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 1999,2000,2001,2002,2003,2004
+ * Free Software Foundation, Inc.
+ * Copyright 2004 Sun Microsystems, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/ctype.h>
+#include <asm/byteorder.h>
+#include "zfs_common.h"
+#include "div64.h"
+
+block_dev_desc_t *zfs_dev_desc;
+
+/*
+ * The zfs plug-in routines for GRUB are:
+ *
+ * zfs_mount() - locates a valid uberblock of the root pool and reads
+ * in its MOS at the memory address MOS.
+ *
+ * zfs_open() - locates a plain file object by following the MOS
+ * and places its dnode at the memory address DNODE.
+ *
+ * zfs_read() - read in the data blocks pointed by the DNODE.
+ *
+ */
+
+#include <zfs/zfs.h>
+#include <zfs/zio.h>
+#include <zfs/dnode.h>
+#include <zfs/uberblock_impl.h>
+#include <zfs/vdev_impl.h>
+#include <zfs/zio_checksum.h>
+#include <zfs/zap_impl.h>
+#include <zfs/zap_leaf.h>
+#include <zfs/zfs_znode.h>
+#include <zfs/dmu.h>
+#include <zfs/dmu_objset.h>
+#include <zfs/sa_impl.h>
+#include <zfs/dsl_dir.h>
+#include <zfs/dsl_dataset.h>
+
+
+#define ZPOOL_PROP_BOOTFS "bootfs"
+
+
+/*
+ * For nvlist manipulation. (from nvpair.h)
+ */
+#define NV_ENCODE_NATIVE 0
+#define NV_ENCODE_XDR 1
+#define NV_BIG_ENDIAN 0
+#define NV_LITTLE_ENDIAN 1
+#define DATA_TYPE_UINT64 8
+#define DATA_TYPE_STRING 9
+#define DATA_TYPE_NVLIST 19
+#define DATA_TYPE_NVLIST_ARRAY 20
+
+
+/*
+ * Macros to get fields in a bp or DVA.
+ */
+#define P2PHASE(x, align) ((x) & ((align) - 1))
+#define DVA_OFFSET_TO_PHYS_SECTOR(offset) \
+ ((offset + VDEV_LABEL_START_SIZE) >> SPA_MINBLOCKSHIFT)
+
+/*
+ * return x rounded down to an align boundary
+ * eg, P2ALIGN(1200, 1024) == 1024 (1*align)
+ * eg, P2ALIGN(1024, 1024) == 1024 (1*align)
+ * eg, P2ALIGN(0x1234, 0x100) == 0x1200 (0x12*align)
+ * eg, P2ALIGN(0x5600, 0x100) == 0x5600 (0x56*align)
+ */
+#define P2ALIGN(x, align) ((x) & -(align))
+
+/*
+ * FAT ZAP data structures
+ */
+#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
+#define ZAP_HASH_IDX(hash, n) (((n) == 0) ? 0 : ((hash) >> (64 - (n))))
+#define CHAIN_END 0xffff /* end of the chunk chain */
+
+/*
+ * The amount of space within the chunk available for the array is:
+ * chunk size - space for type (1) - space for next pointer (2)
+ */
+#define ZAP_LEAF_ARRAY_BYTES (ZAP_LEAF_CHUNKSIZE - 3)
+
+#define ZAP_LEAF_HASH_SHIFT(bs) (bs - 5)
+#define ZAP_LEAF_HASH_NUMENTRIES(bs) (1 << ZAP_LEAF_HASH_SHIFT(bs))
+#define LEAF_HASH(bs, h) \
+ ((ZAP_LEAF_HASH_NUMENTRIES(bs)-1) & \
+ ((h) >> (64 - ZAP_LEAF_HASH_SHIFT(bs)-l->l_hdr.lh_prefix_len)))
+
+/*
+ * The amount of space available for chunks is:
+ * block size shift - hash entry size (2) * number of hash
+ * entries - header space (2*chunksize)
+ */
+#define ZAP_LEAF_NUMCHUNKS(bs) \
+ (((1<<bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(bs)) / \
+ ZAP_LEAF_CHUNKSIZE - 2)
+
+/*
+ * The chunks start immediately after the hash table. The end of the
+ * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a
+ * chunk_t.
+ */
+#define ZAP_LEAF_CHUNK(l, bs, idx) \
+ ((zap_leaf_chunk_t *)(l->l_hash + ZAP_LEAF_HASH_NUMENTRIES(bs)))[idx]
+#define ZAP_LEAF_ENTRY(l, bs, idx) (&ZAP_LEAF_CHUNK(l, bs, idx).l_entry)
+
+
+/*
+ * Decompression Entry - lzjb
+ */
+#ifndef NBBY
+#define NBBY 8
+#endif
+
+
+
+typedef int zfs_decomp_func_t(void *s_start, void *d_start,
+ uint32_t s_len, uint32_t d_len);
+typedef struct decomp_entry {
+ char *name;
+ zfs_decomp_func_t *decomp_func;
+} decomp_entry_t;
+
+typedef struct dnode_end {
+ dnode_phys_t dn;
+ zfs_endian_t endian;
+} dnode_end_t;
+
+struct zfs_data {
+ /* cache for a file block of the currently zfs_open()-ed file */
+ char *file_buf;
+ uint64_t file_start;
+ uint64_t file_end;
+
+ /* XXX: ashift is per vdev, not per pool. We currently only ever touch
+ * a single vdev, but when/if raid-z or stripes are supported, this
+ * may need revision.
+ */
+ uint64_t vdev_ashift;
+ uint64_t label_txg;
+ uint64_t pool_guid;
+
+ /* cache for a dnode block */
+ dnode_phys_t *dnode_buf;
+ dnode_phys_t *dnode_mdn;
+ uint64_t dnode_start;
+ uint64_t dnode_end;
+ zfs_endian_t dnode_endian;
+
+ uberblock_t current_uberblock;
+
+ dnode_end_t mos;
+ dnode_end_t mdn;
+ dnode_end_t dnode;
+
+ uint64_t vdev_phys_sector;
+
+ int (*userhook)(const char *, const struct zfs_dirhook_info *);
+ struct zfs_dirhook_info *dirinfo;
+
+};
+
+
+
+
+static int
+zlib_decompress(void *s, void *d,
+ uint32_t slen, uint32_t dlen)
+{
+ if (zlib_decompress(s, d, slen, dlen) < 0)
+ return ZFS_ERR_BAD_FS;
+ return ZFS_ERR_NONE;
+}
+
+static decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] = {
+ {"inherit", NULL}, /* ZIO_COMPRESS_INHERIT */
+ {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
+ {"off", NULL}, /* ZIO_COMPRESS_OFF */
+ {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
+ {"empty", NULL}, /* ZIO_COMPRESS_EMPTY */
+ {"gzip-1", zlib_decompress}, /* ZIO_COMPRESS_GZIP1 */
+ {"gzip-2", zlib_decompress}, /* ZIO_COMPRESS_GZIP2 */
+ {"gzip-3", zlib_decompress}, /* ZIO_COMPRESS_GZIP3 */
+ {"gzip-4", zlib_decompress}, /* ZIO_COMPRESS_GZIP4 */
+ {"gzip-5", zlib_decompress}, /* ZIO_COMPRESS_GZIP5 */
+ {"gzip-6", zlib_decompress}, /* ZIO_COMPRESS_GZIP6 */
+ {"gzip-7", zlib_decompress}, /* ZIO_COMPRESS_GZIP7 */
+ {"gzip-8", zlib_decompress}, /* ZIO_COMPRESS_GZIP8 */
+ {"gzip-9", zlib_decompress}, /* ZIO_COMPRESS_GZIP9 */
+};
+
+
+
+static int zio_read_data(blkptr_t *bp, zfs_endian_t endian,
+ void *buf, struct zfs_data *data);
+
+static int
+zio_read(blkptr_t *bp, zfs_endian_t endian, void **buf,
+ size_t *size, struct zfs_data *data);
+
+/*
+ * Our own version of log2(). Same thing as highbit()-1.
+ */
+static int
+zfs_log2(uint64_t num)
+{
+ int i = 0;
+
+ while (num > 1) {
+ i++;
+ num = num >> 1;
+ }
+
+ return i;
+}
+
+
+/* Checksum Functions */
+static void
+zio_checksum_off(const void *buf __attribute__ ((unused)),
+ uint64_t size __attribute__ ((unused)),
+ zfs_endian_t endian __attribute__ ((unused)),
+ zio_cksum_t *zcp)
+{
+ ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
+}
+
+/* Checksum Table and Values */
+static zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
+ {NULL, 0, 0, "inherit"},
+ {NULL, 0, 0, "on"},
+ {zio_checksum_off, 0, 0, "off"},
+ {zio_checksum_SHA256, 1, 1, "label"},
+ {zio_checksum_SHA256, 1, 1, "gang_header"},
+ {NULL, 0, 0, "zilog"},
+ {fletcher_2_endian, 0, 0, "fletcher2"},
+ {fletcher_4_endian, 1, 0, "fletcher4"},
+ {zio_checksum_SHA256, 1, 0, "SHA256"},
+ {NULL, 0, 0, "zilog2"},
+};
+
+/*
+ * zio_checksum_verify: Provides support for checksum verification.
+ *
+ * Fletcher2, Fletcher4, and SHA256 are supported.
+ *
+ */
+static int
+zio_checksum_verify(zio_cksum_t zc, uint32_t checksum,
+ zfs_endian_t endian, char *buf, int size)
+{
+ zio_eck_t *zec = (zio_eck_t *) (buf + size) - 1;
+ zio_checksum_info_t *ci = &zio_checksum_table[checksum];
+ zio_cksum_t actual_cksum, expected_cksum;
+
+ if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func == NULL) {
+ printf("zfs unknown checksum function %d\n", checksum);
+ return ZFS_ERR_NOT_IMPLEMENTED_YET;
+ }
+
+ if (ci->ci_eck) {
+ expected_cksum = zec->zec_cksum;
+ zec->zec_cksum = zc;
+ ci->ci_func(buf, size, endian, &actual_cksum);
+ zec->zec_cksum = expected_cksum;
+ zc = expected_cksum;
+ } else {
+ ci->ci_func(buf, size, endian, &actual_cksum);
+ }
+
+ if ((actual_cksum.zc_word[0] != zc.zc_word[0])
+ || (actual_cksum.zc_word[1] != zc.zc_word[1])
+ || (actual_cksum.zc_word[2] != zc.zc_word[2])
+ || (actual_cksum.zc_word[3] != zc.zc_word[3])) {
+ return ZFS_ERR_BAD_FS;
+ }
+
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * vdev_uberblock_compare takes two uberblock structures and returns an integer
+ * indicating the more recent of the two.
+ * Return Value = 1 if ub2 is more recent
+ * Return Value = -1 if ub1 is more recent
+ * The most recent uberblock is determined using its transaction number and
+ * timestamp. The uberblock with the highest transaction number is
+ * considered "newer". If the transaction numbers of the two blocks match, the
+ * timestamps are compared to determine the "newer" of the two.
+ */
+static int
+vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
+{
+ zfs_endian_t ub1_endian, ub2_endian;
+ if (zfs_to_cpu64(ub1->ub_magic, LITTLE_ENDIAN) == UBERBLOCK_MAGIC)
+ ub1_endian = LITTLE_ENDIAN;
+ else
+ ub1_endian = BIG_ENDIAN;
+ if (zfs_to_cpu64(ub2->ub_magic, LITTLE_ENDIAN) == UBERBLOCK_MAGIC)
+ ub2_endian = LITTLE_ENDIAN;
+ else
+ ub2_endian = BIG_ENDIAN;
+
+ if (zfs_to_cpu64(ub1->ub_txg, ub1_endian)
+ < zfs_to_cpu64(ub2->ub_txg, ub2_endian))
+ return -1;
+ if (zfs_to_cpu64(ub1->ub_txg, ub1_endian)
+ > zfs_to_cpu64(ub2->ub_txg, ub2_endian))
+ return 1;
+
+ if (zfs_to_cpu64(ub1->ub_timestamp, ub1_endian)
+ < zfs_to_cpu64(ub2->ub_timestamp, ub2_endian))
+ return -1;
+ if (zfs_to_cpu64(ub1->ub_timestamp, ub1_endian)
+ > zfs_to_cpu64(ub2->ub_timestamp, ub2_endian))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Three pieces of information are needed to verify an uberblock: the magic
+ * number, the version number, and the checksum.
+ *
+ * Currently Implemented: version number, magic number, label txg
+ * Need to Implement: checksum
+ *
+ */
+static int
+uberblock_verify(uberblock_t *uber, int offset, struct zfs_data *data)
+{
+ int err;
+ zfs_endian_t endian = UNKNOWN_ENDIAN;
+ zio_cksum_t zc;
+
+ if (uber->ub_txg < data->label_txg) {
+ debug("ignoring partially written label: uber_txg < label_txg %llu %llu\n",
+ uber->ub_txg, data->label_txg);
+ return ZFS_ERR_BAD_FS;
+ }
+
+ if (zfs_to_cpu64(uber->ub_magic, LITTLE_ENDIAN) == UBERBLOCK_MAGIC
+ && zfs_to_cpu64(uber->ub_version, LITTLE_ENDIAN) > 0
+ && zfs_to_cpu64(uber->ub_version, LITTLE_ENDIAN) <= SPA_VERSION)
+ endian = LITTLE_ENDIAN;
+
+ if (zfs_to_cpu64(uber->ub_magic, BIG_ENDIAN) == UBERBLOCK_MAGIC
+ && zfs_to_cpu64(uber->ub_version, BIG_ENDIAN) > 0
+ && zfs_to_cpu64(uber->ub_version, BIG_ENDIAN) <= SPA_VERSION)
+ endian = BIG_ENDIAN;
+
+ if (endian == UNKNOWN_ENDIAN) {
+ printf("invalid uberblock magic\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ memset(&zc, 0, sizeof(zc));
+ zc.zc_word[0] = cpu_to_zfs64(offset, endian);
+ err = zio_checksum_verify(zc, ZIO_CHECKSUM_LABEL, endian,
+ (char *) uber, UBERBLOCK_SIZE(data->vdev_ashift));
+
+ if (!err) {
+ /* Check that the data pointed by the rootbp is usable. */
+ void *osp = NULL;
+ size_t ospsize;
+ err = zio_read(&uber->ub_rootbp, endian, &osp, &ospsize, data);
+ free(osp);
+
+ if (!err && ospsize < OBJSET_PHYS_SIZE_V14) {
+ printf("uberblock rootbp points to invalid data\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ }
+
+ return err;
+}
+
+/*
+ * Find the best uberblock.
+ * Return:
+ * Success - Pointer to the best uberblock.
+ * Failure - NULL
+ */
+static uberblock_t *find_bestub(char *ub_array, struct zfs_data *data)
+{
+ const uint64_t sector = data->vdev_phys_sector;
+ uberblock_t *ubbest = NULL;
+ uberblock_t *ubnext;
+ unsigned int i, offset, pickedub = 0;
+ int err = ZFS_ERR_NONE;
+
+ const unsigned int UBCOUNT = UBERBLOCK_COUNT(data->vdev_ashift);
+ const uint64_t UBBYTES = UBERBLOCK_SIZE(data->vdev_ashift);
+
+ for (i = 0; i < UBCOUNT; i++) {
+ ubnext = (uberblock_t *) (i * UBBYTES + ub_array);
+ offset = (sector << SPA_MINBLOCKSHIFT) + VDEV_PHYS_SIZE + (i * UBBYTES);
+
+ err = uberblock_verify(ubnext, offset, data);
+ if (err)
+ continue;
+
+ if (ubbest == NULL || vdev_uberblock_compare(ubnext, ubbest) > 0) {
+ ubbest = ubnext;
+ pickedub = i;
+ }
+ }
+
+ if (ubbest)
+ debug("zfs Found best uberblock at idx %d, txg %llu\n",
+ pickedub, (unsigned long long) ubbest->ub_txg);
+
+ return ubbest;
+}
+
+static inline size_t
+get_psize(blkptr_t *bp, zfs_endian_t endian)
+{
+ return (((zfs_to_cpu64((bp)->blk_prop, endian) >> 16) & 0xffff) + 1)
+ << SPA_MINBLOCKSHIFT;
+}
+
+static uint64_t
+dva_get_offset(dva_t *dva, zfs_endian_t endian)
+{
+ return zfs_to_cpu64((dva)->dva_word[1],
+ endian) << SPA_MINBLOCKSHIFT;
+}
+
+/*
+ * Read a block of data based on the gang block address dva,
+ * and put its data in buf.
+ *
+ */
+static int
+zio_read_gang(blkptr_t *bp, zfs_endian_t endian, dva_t *dva, void *buf,
+ struct zfs_data *data)
+{
+ zio_gbh_phys_t *zio_gb;
+ uint64_t offset, sector;
+ unsigned i;
+ int err;
+ zio_cksum_t zc;
+
+ memset(&zc, 0, sizeof(zc));
+
+ zio_gb = malloc(SPA_GANGBLOCKSIZE);
+ if (!zio_gb)
+ return ZFS_ERR_OUT_OF_MEMORY;
+
+ offset = dva_get_offset(dva, endian);
+ sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
+
+ /* read in the gang block header */
+ err = zfs_devread(sector, 0, SPA_GANGBLOCKSIZE, (char *) zio_gb);
+
+ if (err) {
+ free(zio_gb);
+ return err;
+ }
+
+ /* XXX */
+ /* self checksuming the gang block header */
+ ZIO_SET_CHECKSUM(&zc, DVA_GET_VDEV(dva),
+ dva_get_offset(dva, endian), bp->blk_birth, 0);
+ err = zio_checksum_verify(zc, ZIO_CHECKSUM_GANG_HEADER, endian,
+ (char *) zio_gb, SPA_GANGBLOCKSIZE);
+ if (err) {
+ free(zio_gb);
+ return err;
+ }
+
+ endian = (zfs_to_cpu64(bp->blk_prop, endian) >> 63) & 1;
+
+ for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
+ if (zio_gb->zg_blkptr[i].blk_birth == 0)
+ continue;
+
+ err = zio_read_data(&zio_gb->zg_blkptr[i], endian, buf, data);
+ if (err) {
+ free(zio_gb);
+ return err;
+ }
+ buf = (char *) buf + get_psize(&zio_gb->zg_blkptr[i], endian);
+ }
+ free(zio_gb);
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * Read in a block of raw data to buf.
+ */
+static int
+zio_read_data(blkptr_t *bp, zfs_endian_t endian, void *buf,
+ struct zfs_data *data)
+{
+ int i, psize;
+ int err = ZFS_ERR_NONE;
+
+ psize = get_psize(bp, endian);
+
+ /* pick a good dva from the block pointer */
+ for (i = 0; i < SPA_DVAS_PER_BP; i++) {
+ uint64_t offset, sector;
+
+ if (bp->blk_dva[i].dva_word[0] == 0 && bp->blk_dva[i].dva_word[1] == 0)
+ continue;
+
+ if ((zfs_to_cpu64(bp->blk_dva[i].dva_word[1], endian)>>63) & 1) {
+ err = zio_read_gang(bp, endian, &bp->blk_dva[i], buf, data);
+ } else {
+ /* read in a data block */
+ offset = dva_get_offset(&bp->blk_dva[i], endian);
+ sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
+
+ err = zfs_devread(sector, 0, psize, buf);
+ }
+
+ if (!err) {
+ /*Check the underlying checksum before we rule this DVA as "good"*/
+ uint32_t checkalgo = (zfs_to_cpu64((bp)->blk_prop, endian) >> 40) & 0xff;
+
+ err = zio_checksum_verify(bp->blk_cksum, checkalgo, endian, buf, psize);
+ if (!err)
+ return ZFS_ERR_NONE;
+ }
+
+ /* If read failed or checksum bad, reset the error. Hopefully we've got some more DVA's to try.*/
+ }
+
+ if (!err) {
+ printf("couldn't find a valid DVA\n");
+ err = ZFS_ERR_BAD_FS;
+ }
+
+ return err;
+}
+
+/*
+ * Read in a block of data, verify its checksum, decompress if needed,
+ * and put the uncompressed data in buf.
+ */
+static int
+zio_read(blkptr_t *bp, zfs_endian_t endian, void **buf,
+ size_t *size, struct zfs_data *data)
+{
+ size_t lsize, psize;
+ unsigned int comp;
+ char *compbuf = NULL;
+ int err;
+
+ *buf = NULL;
+
+ comp = (zfs_to_cpu64((bp)->blk_prop, endian)>>32) & 0xff;
+ lsize = (BP_IS_HOLE(bp) ? 0 :
+ (((zfs_to_cpu64((bp)->blk_prop, endian) & 0xffff) + 1)
+ << SPA_MINBLOCKSHIFT));
+ psize = get_psize(bp, endian);
+
+ if (size)
+ *size = lsize;
+
+ if (comp >= ZIO_COMPRESS_FUNCTIONS) {
+ printf("compression algorithm %u not supported\n", (unsigned int) comp);
+ return ZFS_ERR_NOT_IMPLEMENTED_YET;
+ }
+
+ if (comp != ZIO_COMPRESS_OFF && decomp_table[comp].decomp_func == NULL) {
+ printf("compression algorithm %s not supported\n", decomp_table[comp].name);
+ return ZFS_ERR_NOT_IMPLEMENTED_YET;
+ }
+
+ if (comp != ZIO_COMPRESS_OFF) {
+ compbuf = malloc(psize);
+ if (!compbuf)
+ return ZFS_ERR_OUT_OF_MEMORY;
+ } else {
+ compbuf = *buf = malloc(lsize);
+ }
+
+ err = zio_read_data(bp, endian, compbuf, data);
+ if (err) {
+ free(compbuf);
+ *buf = NULL;
+ return err;
+ }
+
+ if (comp != ZIO_COMPRESS_OFF) {
+ *buf = malloc(lsize);
+ if (!*buf) {
+ free(compbuf);
+ return ZFS_ERR_OUT_OF_MEMORY;
+ }
+
+ err = decomp_table[comp].decomp_func(compbuf, *buf, psize, lsize);
+ free(compbuf);
+ if (err) {
+ free(*buf);
+ *buf = NULL;
+ return err;
+ }
+ }
+
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * Get the block from a block id.
+ * push the block onto the stack.
+ *
+ */
+static int
+dmu_read(dnode_end_t *dn, uint64_t blkid, void **buf,
+ zfs_endian_t *endian_out, struct zfs_data *data)
+{
+ int idx, level;
+ blkptr_t *bp_array = dn->dn.dn_blkptr;
+ int epbs = dn->dn.dn_indblkshift - SPA_BLKPTRSHIFT;
+ blkptr_t *bp;
+ void *tmpbuf = 0;
+ zfs_endian_t endian;
+ int err = ZFS_ERR_NONE;
+
+ bp = malloc(sizeof(blkptr_t));
+ if (!bp)
+ return ZFS_ERR_OUT_OF_MEMORY;
+
+ endian = dn->endian;
+ for (level = dn->dn.dn_nlevels - 1; level >= 0; level--) {
+ idx = (blkid >> (epbs * level)) & ((1 << epbs) - 1);
+ *bp = bp_array[idx];
+ if (bp_array != dn->dn.dn_blkptr) {
+ free(bp_array);
+ bp_array = 0;
+ }
+
+ if (BP_IS_HOLE(bp)) {
+ size_t size = zfs_to_cpu16(dn->dn.dn_datablkszsec,
+ dn->endian)
+ << SPA_MINBLOCKSHIFT;
+ *buf = malloc(size);
+ if (*buf) {
+ err = ZFS_ERR_OUT_OF_MEMORY;
+ break;
+ }
+ memset(*buf, 0, size);
+ endian = (zfs_to_cpu64(bp->blk_prop, endian) >> 63) & 1;
+ break;
+ }
+ if (level == 0) {
+ err = zio_read(bp, endian, buf, 0, data);
+ endian = (zfs_to_cpu64(bp->blk_prop, endian) >> 63) & 1;
+ break;
+ }
+ err = zio_read(bp, endian, &tmpbuf, 0, data);
+ endian = (zfs_to_cpu64(bp->blk_prop, endian) >> 63) & 1;
+ if (err)
+ break;
+ bp_array = tmpbuf;
+ }
+ if (bp_array != dn->dn.dn_blkptr)
+ free(bp_array);
+ if (endian_out)
+ *endian_out = endian;
+
+ free(bp);
+ return err;
+}
+
+/*
+ * mzap_lookup: Looks up property described by "name" and returns the value
+ * in "value".
+ */
+static int
+mzap_lookup(mzap_phys_t *zapobj, zfs_endian_t endian,
+ int objsize, char *name, uint64_t * value)
+{
+ int i, chunks;
+ mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
+
+ chunks = objsize / MZAP_ENT_LEN - 1;
+ for (i = 0; i < chunks; i++) {
+ if (strcmp(mzap_ent[i].mze_name, name) == 0) {
+ *value = zfs_to_cpu64(mzap_ent[i].mze_value, endian);
+ return ZFS_ERR_NONE;
+ }
+ }
+
+ printf("couldn't find '%s'\n", name);
+ return ZFS_ERR_FILE_NOT_FOUND;
+}
+
+static int
+mzap_iterate(mzap_phys_t *zapobj, zfs_endian_t endian, int objsize,
+ int (*hook)(const char *name,
+ uint64_t val,
+ struct zfs_data *data),
+ struct zfs_data *data)
+{
+ int i, chunks;
+ mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
+
+ chunks = objsize / MZAP_ENT_LEN - 1;
+ for (i = 0; i < chunks; i++) {
+ if (hook(mzap_ent[i].mze_name,
+ zfs_to_cpu64(mzap_ent[i].mze_value, endian),
+ data))
+ return 1;
+ }
+
+ return 0;
+}
+
+static uint64_t
+zap_hash(uint64_t salt, const char *name)
+{
+ static uint64_t table[256];
+ const uint8_t *cp;
+ uint8_t c;
+ uint64_t crc = salt;
+
+ if (table[128] == 0) {
+ uint64_t *ct;
+ int i, j;
+ for (i = 0; i < 256; i++) {
+ for (ct = table + i, *ct = i, j = 8; j > 0; j--)
+ *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
+ }
+ }
+
+ for (cp = (const uint8_t *) name; (c = *cp) != '\0'; cp++)
+ crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
+
+ /*
+ * Only use 28 bits, since we need 4 bits in the cookie for the
+ * collision differentiator. We MUST use the high bits, since
+ * those are the onces that we first pay attention to when
+ * chosing the bucket.
+ */
+ crc &= ~((1ULL << (64 - ZAP_HASHBITS)) - 1);
+
+ return crc;
+}
+
+/*
+ * Only to be used on 8-bit arrays.
+ * array_len is actual len in bytes (not encoded le_value_length).
+ * buf is null-terminated.
+ */
+/* XXX */
+static int
+zap_leaf_array_equal(zap_leaf_phys_t *l, zfs_endian_t endian,
+ int blksft, int chunk, int array_len, const char *buf)
+{
+ int bseen = 0;
+
+ while (bseen < array_len) {
+ struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
+ int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
+
+ if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
+ return 0;
+
+ if (memcmp(la->la_array, buf + bseen, toread) != 0)
+ break;
+ chunk = zfs_to_cpu16(la->la_next, endian);
+ bseen += toread;
+ }
+ return (bseen == array_len);
+}
+
+/* XXX */
+static int
+zap_leaf_array_get(zap_leaf_phys_t *l, zfs_endian_t endian, int blksft,
+ int chunk, int array_len, char *buf)
+{
+ int bseen = 0;
+
+ while (bseen < array_len) {
+ struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
+ int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
+
+ if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
+ /* Don't use errno because this error is to be ignored. */
+ return ZFS_ERR_BAD_FS;
+
+ memcpy(buf + bseen, la->la_array, toread);
+ chunk = zfs_to_cpu16(la->la_next, endian);
+ bseen += toread;
+ }
+ return ZFS_ERR_NONE;
+}
+
+
+/*
+ * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
+ * value for the property "name".
+ *
+ */
+/* XXX */
+static int
+zap_leaf_lookup(zap_leaf_phys_t *l, zfs_endian_t endian,
+ int blksft, uint64_t h,
+ const char *name, uint64_t *value)
+{
+ uint16_t chunk;
+ struct zap_leaf_entry *le;
+
+ /* Verify if this is a valid leaf block */
+ if (zfs_to_cpu64(l->l_hdr.lh_block_type, endian) != ZBT_LEAF) {
+ printf("invalid leaf type\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ if (zfs_to_cpu32(l->l_hdr.lh_magic, endian) != ZAP_LEAF_MAGIC) {
+ printf("invalid leaf magic\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ for (chunk = zfs_to_cpu16(l->l_hash[LEAF_HASH(blksft, h)], endian);
+ chunk != CHAIN_END; chunk = le->le_next) {
+
+ if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft)) {
+ printf("invalid chunk number\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ le = ZAP_LEAF_ENTRY(l, blksft, chunk);
+
+ /* Verify the chunk entry */
+ if (le->le_type != ZAP_CHUNK_ENTRY) {
+ printf("invalid chunk entry\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ if (zfs_to_cpu64(le->le_hash, endian) != h)
+ continue;
+
+ if (zap_leaf_array_equal(l, endian, blksft,
+ zfs_to_cpu16(le->le_name_chunk, endian),
+ zfs_to_cpu16(le->le_name_length, endian),
+ name)) {
+ struct zap_leaf_array *la;
+
+ if (le->le_int_size != 8 || le->le_value_length != 1) {
+ printf("invalid leaf chunk entry\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ /* get the uint64_t property value */
+ la = &ZAP_LEAF_CHUNK(l, blksft, le->le_value_chunk).l_array;
+
+ *value = be64_to_cpu(la->la_array64);
+
+ return ZFS_ERR_NONE;
+ }
+ }
+
+ printf("couldn't find '%s'\n", name);
+ return ZFS_ERR_FILE_NOT_FOUND;
+}
+
+
+/* Verify if this is a fat zap header block */
+static int
+zap_verify(zap_phys_t *zap)
+{
+ if (zap->zap_magic != (uint64_t) ZAP_MAGIC) {
+ printf("bad ZAP magic\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ if (zap->zap_flags != 0) {
+ printf("bad ZAP flags\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ if (zap->zap_salt == 0) {
+ printf("bad ZAP salt\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * Fat ZAP lookup
+ *
+ */
+/* XXX */
+static int
+fzap_lookup(dnode_end_t *zap_dnode, zap_phys_t *zap,
+ char *name, uint64_t *value, struct zfs_data *data)
+{
+ void *l;
+ uint64_t hash, idx, blkid;
+ int blksft = zfs_log2(zfs_to_cpu16(zap_dnode->dn.dn_datablkszsec,
+ zap_dnode->endian) << DNODE_SHIFT);
+ int err;
+ zfs_endian_t leafendian;
+
+ err = zap_verify(zap);
+ if (err)
+ return err;
+
+ hash = zap_hash(zap->zap_salt, name);
+
+ /* get block id from index */
+ if (zap->zap_ptrtbl.zt_numblks != 0) {
+ printf("external pointer tables not supported\n");
+ return ZFS_ERR_NOT_IMPLEMENTED_YET;
+ }
+ idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
+ blkid = ((uint64_t *) zap)[idx + (1 << (blksft - 3 - 1))];
+
+ /* Get the leaf block */
+ if ((1U << blksft) < sizeof(zap_leaf_phys_t)) {
+ printf("ZAP leaf is too small\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ err = dmu_read(zap_dnode, blkid, &l, &leafendian, data);
+ if (err)
+ return err;
+
+ err = zap_leaf_lookup(l, leafendian, blksft, hash, name, value);
+ free(l);
+ return err;
+}
+
+/* XXX */
+static int
+fzap_iterate(dnode_end_t *zap_dnode, zap_phys_t *zap,
+ int (*hook)(const char *name,
+ uint64_t val,
+ struct zfs_data *data),
+ struct zfs_data *data)
+{
+ zap_leaf_phys_t *l;
+ void *l_in;
+ uint64_t idx, blkid;
+ uint16_t chunk;
+ int blksft = zfs_log2(zfs_to_cpu16(zap_dnode->dn.dn_datablkszsec,
+ zap_dnode->endian) << DNODE_SHIFT);
+ int err;
+ zfs_endian_t endian;
+
+ if (zap_verify(zap))
+ return 0;
+
+ /* get block id from index */
+ if (zap->zap_ptrtbl.zt_numblks != 0) {
+ printf("external pointer tables not supported\n");
+ return 0;
+ }
+ /* Get the leaf block */
+ if ((1U << blksft) < sizeof(zap_leaf_phys_t)) {
+ printf("ZAP leaf is too small\n");
+ return 0;
+ }
+ for (idx = 0; idx < zap->zap_ptrtbl.zt_numblks; idx++) {
+ blkid = ((uint64_t *) zap)[idx + (1 << (blksft - 3 - 1))];
+
+ err = dmu_read(zap_dnode, blkid, &l_in, &endian, data);
+ l = l_in;
+ if (err)
+ continue;
+
+ /* Verify if this is a valid leaf block */
+ if (zfs_to_cpu64(l->l_hdr.lh_block_type, endian) != ZBT_LEAF) {
+ free(l);
+ continue;
+ }
+ if (zfs_to_cpu32(l->l_hdr.lh_magic, endian) != ZAP_LEAF_MAGIC) {
+ free(l);
+ continue;
+ }
+
+ for (chunk = 0; chunk < ZAP_LEAF_NUMCHUNKS(blksft); chunk++) {
+ char *buf;
+ struct zap_leaf_array *la;
+ struct zap_leaf_entry *le;
+ uint64_t val;
+ le = ZAP_LEAF_ENTRY(l, blksft, chunk);
+
+ /* Verify the chunk entry */
+ if (le->le_type != ZAP_CHUNK_ENTRY)
+ continue;
+
+ buf = malloc(zfs_to_cpu16(le->le_name_length, endian)
+ + 1);
+ if (zap_leaf_array_get(l, endian, blksft, le->le_name_chunk,
+ le->le_name_length, buf)) {
+ free(buf);
+ continue;
+ }
+ buf[le->le_name_length] = 0;
+
+ if (le->le_int_size != 8
+ || zfs_to_cpu16(le->le_value_length, endian) != 1)
+ continue;
+
+ /* get the uint64_t property value */
+ la = &ZAP_LEAF_CHUNK(l, blksft, le->le_value_chunk).l_array;
+ val = be64_to_cpu(la->la_array64);
+ if (hook(buf, val, data))
+ return 1;
+ free(buf);
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * Read in the data of a zap object and find the value for a matching
+ * property name.
+ *
+ */
+static int
+zap_lookup(dnode_end_t *zap_dnode, char *name, uint64_t *val,
+ struct zfs_data *data)
+{
+ uint64_t block_type;
+ int size;
+ void *zapbuf;
+ int err;
+ zfs_endian_t endian;
+
+ /* Read in the first block of the zap object data. */
+ size = zfs_to_cpu16(zap_dnode->dn.dn_datablkszsec,
+ zap_dnode->endian) << SPA_MINBLOCKSHIFT;
+ err = dmu_read(zap_dnode, 0, &zapbuf, &endian, data);
+ if (err)
+ return err;
+ block_type = zfs_to_cpu64(*((uint64_t *) zapbuf), endian);
+
+ if (block_type == ZBT_MICRO) {
+ err = (mzap_lookup(zapbuf, endian, size, name, val));
+ free(zapbuf);
+ return err;
+ } else if (block_type == ZBT_HEADER) {
+ /* this is a fat zap */
+ err = (fzap_lookup(zap_dnode, zapbuf, name, val, data));
+ free(zapbuf);
+ return err;
+ }
+
+ printf("unknown ZAP type\n");
+ return ZFS_ERR_BAD_FS;
+}
+
+static int
+zap_iterate(dnode_end_t *zap_dnode,
+ int (*hook)(const char *name, uint64_t val,
+ struct zfs_data *data),
+ struct zfs_data *data)
+{
+ uint64_t block_type;
+ int size;
+ void *zapbuf;
+ int err;
+ int ret;
+ zfs_endian_t endian;
+
+ /* Read in the first block of the zap object data. */
+ size = zfs_to_cpu16(zap_dnode->dn.dn_datablkszsec, zap_dnode->endian) << SPA_MINBLOCKSHIFT;
+ err = dmu_read(zap_dnode, 0, &zapbuf, &endian, data);
+ if (err)
+ return 0;
+ block_type = zfs_to_cpu64(*((uint64_t *) zapbuf), endian);
+
+ if (block_type == ZBT_MICRO) {
+ ret = mzap_iterate(zapbuf, endian, size, hook, data);
+ free(zapbuf);
+ return ret;
+ } else if (block_type == ZBT_HEADER) {
+ /* this is a fat zap */
+ ret = fzap_iterate(zap_dnode, zapbuf, hook, data);
+ free(zapbuf);
+ return ret;
+ }
+ printf("unknown ZAP type\n");
+ return 0;
+}
+
+
+/*
+ * Get the dnode of an object number from the metadnode of an object set.
+ *
+ * Input
+ * mdn - metadnode to get the object dnode
+ * objnum - object number for the object dnode
+ * buf - data buffer that holds the returning dnode
+ */
+static int
+dnode_get(dnode_end_t *mdn, uint64_t objnum, uint8_t type,
+ dnode_end_t *buf, struct zfs_data *data)
+{
+ uint64_t blkid, blksz; /* the block id this object dnode is in */
+ int epbs; /* shift of number of dnodes in a block */
+ int idx; /* index within a block */
+ void *dnbuf;
+ int err;
+ zfs_endian_t endian;
+
+ blksz = zfs_to_cpu16(mdn->dn.dn_datablkszsec,
+ mdn->endian) << SPA_MINBLOCKSHIFT;
+
+ epbs = zfs_log2(blksz) - DNODE_SHIFT;
+ blkid = objnum >> epbs;
+ idx = objnum & ((1 << epbs) - 1);
+
+ if (data->dnode_buf != NULL && memcmp(data->dnode_mdn, mdn,
+ sizeof(*mdn)) == 0
+ && objnum >= data->dnode_start && objnum < data->dnode_end) {
+ memmove(&(buf->dn), &(data->dnode_buf)[idx], DNODE_SIZE);
+ buf->endian = data->dnode_endian;
+ if (type && buf->dn.dn_type != type) {
+ printf("incorrect dnode type: %02X != %02x\n", buf->dn.dn_type, type);
+ return ZFS_ERR_BAD_FS;
+ }
+ return ZFS_ERR_NONE;
+ }
+
+ err = dmu_read(mdn, blkid, &dnbuf, &endian, data);
+ if (err)
+ return err;
+
+ free(data->dnode_buf);
+ free(data->dnode_mdn);
+ data->dnode_mdn = malloc(sizeof(*mdn));
+ if (!data->dnode_mdn) {
+ data->dnode_buf = 0;
+ } else {
+ memcpy(data->dnode_mdn, mdn, sizeof(*mdn));
+ data->dnode_buf = dnbuf;
+ data->dnode_start = blkid << epbs;
+ data->dnode_end = (blkid + 1) << epbs;
+ data->dnode_endian = endian;
+ }
+
+ memmove(&(buf->dn), (dnode_phys_t *) dnbuf + idx, DNODE_SIZE);
+ buf->endian = endian;
+ if (type && buf->dn.dn_type != type) {
+ printf("incorrect dnode type\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * Get the file dnode for a given file name where mdn is the meta dnode
+ * for this ZFS object set. When found, place the file dnode in dn.
+ * The 'path' argument will be mangled.
+ *
+ */
+static int
+dnode_get_path(dnode_end_t *mdn, const char *path_in, dnode_end_t *dn,
+ struct zfs_data *data)
+{
+ uint64_t objnum, version;
+ char *cname, ch;
+ int err = ZFS_ERR_NONE;
+ char *path, *path_buf;
+ struct dnode_chain {
+ struct dnode_chain *next;
+ dnode_end_t dn;
+ };
+ struct dnode_chain *dnode_path = 0, *dn_new, *root;
+
+ dn_new = malloc(sizeof(*dn_new));
+ if (!dn_new)
+ return ZFS_ERR_OUT_OF_MEMORY;
+ dn_new->next = 0;
+ dnode_path = root = dn_new;
+
+ err = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
+ &(dnode_path->dn), data);
+ if (err) {
+ free(dn_new);
+ return err;
+ }
+
+ err = zap_lookup(&(dnode_path->dn), ZPL_VERSION_STR, &version, data);
+ if (err) {
+ free(dn_new);
+ return err;
+ }
+ if (version > ZPL_VERSION) {
+ free(dn_new);
+ printf("too new ZPL version\n");
+ return ZFS_ERR_NOT_IMPLEMENTED_YET;
+ }
+
+ err = zap_lookup(&(dnode_path->dn), ZFS_ROOT_OBJ, &objnum, data);
+ if (err) {
+ free(dn_new);
+ return err;
+ }
+
+ err = dnode_get(mdn, objnum, 0, &(dnode_path->dn), data);
+ if (err) {
+ free(dn_new);
+ return err;
+ }
+
+ path = path_buf = strdup(path_in);
+ if (!path_buf) {
+ free(dn_new);
+ return ZFS_ERR_OUT_OF_MEMORY;
+ }
+
+ while (1) {
+ /* skip leading slashes */
+ while (*path == '/')
+ path++;
+ if (!*path)
+ break;
+ /* get the next component name */
+ cname = path;
+ while (*path && *path != '/')
+ path++;
+ /* Skip dot. */
+ if (cname + 1 == path && cname[0] == '.')
+ continue;
+ /* Handle double dot. */
+ if (cname + 2 == path && cname[0] == '.' && cname[1] == '.') {
+ if (dn_new->next) {
+ dn_new = dnode_path;
+ dnode_path = dn_new->next;
+ free(dn_new);
+ } else {
+ printf("can't resolve ..\n");
+ err = ZFS_ERR_FILE_NOT_FOUND;
+ break;
+ }
+ continue;
+ }
+
+ ch = *path;
+ *path = 0; /* ensure null termination */
+
+ if (dnode_path->dn.dn.dn_type != DMU_OT_DIRECTORY_CONTENTS) {
+ free(path_buf);
+ printf("not a directory\n");
+ return ZFS_ERR_BAD_FILE_TYPE;
+ }
+ err = zap_lookup(&(dnode_path->dn), cname, &objnum, data);
+ if (err)
+ break;
+
+ dn_new = malloc(sizeof(*dn_new));
+ if (!dn_new) {
+ err = ZFS_ERR_OUT_OF_MEMORY;
+ break;
+ }
+ dn_new->next = dnode_path;
+ dnode_path = dn_new;
+
+ objnum = ZFS_DIRENT_OBJ(objnum);
+ err = dnode_get(mdn, objnum, 0, &(dnode_path->dn), data);
+ if (err)
+ break;
+
+ *path = ch;
+ }
+
+ if (!err)
+ memcpy(dn, &(dnode_path->dn), sizeof(*dn));
+
+ while (dnode_path) {
+ dn_new = dnode_path->next;
+ free(dnode_path);
+ dnode_path = dn_new;
+ }
+ free(path_buf);
+ return err;
+}
+
+
+/*
+ * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
+ * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
+ * of pool/rootfs.
+ *
+ * If no fsname and no obj are given, return the DSL_DIR metadnode.
+ * If fsname is given, return its metadnode and its matching object number.
+ * If only obj is given, return the metadnode for this object number.
+ *
+ */
+static int
+get_filesystem_dnode(dnode_end_t *mosmdn, char *fsname,
+ dnode_end_t *mdn, struct zfs_data *data)
+{
+ uint64_t objnum;
+ int err;
+
+ err = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_OT_OBJECT_DIRECTORY, mdn, data);
+ if (err)
+ return err;
+
+ err = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum, data);
+ if (err)
+ return err;
+
+ err = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, data);
+ if (err)
+ return err;
+
+ while (*fsname) {
+ uint64_t childobj;
+ char *cname, ch;
+
+ while (*fsname == '/')
+ fsname++;
+
+ if (!*fsname || *fsname == '@')
+ break;
+
+ cname = fsname;
+ while (*fsname && !isspace(*fsname) && *fsname != '/')
+ fsname++;
+ ch = *fsname;
+ *fsname = 0;
+
+ childobj = zfs_to_cpu64((((dsl_dir_phys_t *) DN_BONUS(&mdn->dn)))->dd_child_dir_zapobj, mdn->endian);
+ err = dnode_get(mosmdn, childobj,
+ DMU_OT_DSL_DIR_CHILD_MAP, mdn, data);
+ if (err)
+ return err;
+
+ err = zap_lookup(mdn, cname, &objnum, data);
+ if (err)
+ return err;
+
+ err = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, data);
+ if (err)
+ return err;
+
+ *fsname = ch;
+ }
+ return ZFS_ERR_NONE;
+}
+
+static int
+make_mdn(dnode_end_t *mdn, struct zfs_data *data)
+{
+ void *osp;
+ blkptr_t *bp;
+ size_t ospsize;
+ int err;
+
+ bp = &(((dsl_dataset_phys_t *) DN_BONUS(&mdn->dn))->ds_bp);
+ err = zio_read(bp, mdn->endian, &osp, &ospsize, data);
+ if (err)
+ return err;
+ if (ospsize < OBJSET_PHYS_SIZE_V14) {
+ free(osp);
+ printf("too small osp\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ mdn->endian = (zfs_to_cpu64(bp->blk_prop, mdn->endian)>>63) & 1;
+ memmove((char *) &(mdn->dn),
+ (char *) &((objset_phys_t *) osp)->os_meta_dnode, DNODE_SIZE);
+ free(osp);
+ return ZFS_ERR_NONE;
+}
+
+static int
+dnode_get_fullpath(const char *fullpath, dnode_end_t *mdn,
+ uint64_t *mdnobj, dnode_end_t *dn, int *isfs,
+ struct zfs_data *data)
+{
+ char *fsname, *snapname;
+ const char *ptr_at, *filename;
+ uint64_t headobj;
+ int err;
+
+ ptr_at = strchr(fullpath, '@');
+ if (!ptr_at) {
+ *isfs = 1;
+ filename = 0;
+ snapname = 0;
+ fsname = strdup(fullpath);
+ } else {
+ const char *ptr_slash = strchr(ptr_at, '/');
+
+ *isfs = 0;
+ fsname = malloc(ptr_at - fullpath + 1);
+ if (!fsname)
+ return ZFS_ERR_OUT_OF_MEMORY;
+ memcpy(fsname, fullpath, ptr_at - fullpath);
+ fsname[ptr_at - fullpath] = 0;
+ if (ptr_at[1] && ptr_at[1] != '/') {
+ snapname = malloc(ptr_slash - ptr_at);
+ if (!snapname) {
+ free(fsname);
+ return ZFS_ERR_OUT_OF_MEMORY;
+ }
+ memcpy(snapname, ptr_at + 1, ptr_slash - ptr_at - 1);
+ snapname[ptr_slash - ptr_at - 1] = 0;
+ } else {
+ snapname = 0;
+ }
+ if (ptr_slash)
+ filename = ptr_slash;
+ else
+ filename = "/";
+ printf("zfs fsname = '%s' snapname='%s' filename = '%s'\n",
+ fsname, snapname, filename);
+ }
+
+
+ err = get_filesystem_dnode(&(data->mos), fsname, dn, data);
+
+ if (err) {
+ free(fsname);
+ free(snapname);
+ return err;
+ }
+
+ headobj = zfs_to_cpu64(((dsl_dir_phys_t *) DN_BONUS(&dn->dn))->dd_head_dataset_obj, dn->endian);
+
+ err = dnode_get(&(data->mos), headobj, DMU_OT_DSL_DATASET, mdn, data);
+ if (err) {
+ free(fsname);
+ free(snapname);
+ return err;
+ }
+
+ if (snapname) {
+ uint64_t snapobj;
+
+ snapobj = zfs_to_cpu64(((dsl_dataset_phys_t *) DN_BONUS(&mdn->dn))->ds_snapnames_zapobj, mdn->endian);
+
+ err = dnode_get(&(data->mos), snapobj,
+ DMU_OT_DSL_DS_SNAP_MAP, mdn, data);
+ if (!err)
+ err = zap_lookup(mdn, snapname, &headobj, data);
+ if (!err)
+ err = dnode_get(&(data->mos), headobj, DMU_OT_DSL_DATASET, mdn, data);
+ if (err) {
+ free(fsname);
+ free(snapname);
+ return err;
+ }
+ }
+
+ if (mdnobj)
+ *mdnobj = headobj;
+
+ make_mdn(mdn, data);
+
+ if (*isfs) {
+ free(fsname);
+ free(snapname);
+ return ZFS_ERR_NONE;
+ }
+ err = dnode_get_path(mdn, filename, dn, data);
+ free(fsname);
+ free(snapname);
+ return err;
+}
+
+/*
+ * For a given XDR packed nvlist, verify the first 4 bytes and move on.
+ *
+ * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
+ *
+ * encoding method/host endian (4 bytes)
+ * nvl_version (4 bytes)
+ * nvl_nvflag (4 bytes)
+ * encoded nvpairs:
+ * encoded size of the nvpair (4 bytes)
+ * decoded size of the nvpair (4 bytes)
+ * name string size (4 bytes)
+ * name string data (sizeof(NV_ALIGN4(string))
+ * data type (4 bytes)
+ * # of elements in the nvpair (4 bytes)
+ * data
+ * 2 zero's for the last nvpair
+ * (end of the entire list) (8 bytes)
+ *
+ */
+
+static int
+nvlist_find_value(char *nvlist, char *name, int valtype, char **val,
+ size_t *size_out, size_t *nelm_out)
+{
+ int name_len, type, encode_size;
+ char *nvpair, *nvp_name;
+
+ /* Verify if the 1st and 2nd byte in the nvlist are valid. */
+ /* NOTE: independently of what endianness header announces all
+ subsequent values are big-endian. */
+ if (nvlist[0] != NV_ENCODE_XDR || (nvlist[1] != NV_LITTLE_ENDIAN
+ && nvlist[1] != NV_BIG_ENDIAN)) {
+ printf("zfs incorrect nvlist header\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ /* skip the header, nvl_version, and nvl_nvflag */
+ nvlist = nvlist + 4 * 3;
+ /*
+ * Loop thru the nvpair list
+ * The XDR representation of an integer is in big-endian byte order.
+ */
+ while ((encode_size = be32_to_cpu(*(uint32_t *) nvlist))) {
+ int nelm;
+
+ nvpair = nvlist + 4 * 2; /* skip the encode/decode size */
+
+ name_len = be32_to_cpu(*(uint32_t *) nvpair);
+ nvpair += 4;
+
+ nvp_name = nvpair;
+ nvpair = nvpair + ((name_len + 3) & ~3); /* align */
+
+ type = be32_to_cpu(*(uint32_t *) nvpair);
+ nvpair += 4;
+
+ nelm = be32_to_cpu(*(uint32_t *) nvpair);
+ if (nelm < 1) {
+ printf("empty nvpair\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ nvpair += 4;
+
+ if ((strncmp(nvp_name, name, name_len) == 0) && type == valtype) {
+ *val = nvpair;
+ *size_out = encode_size;
+ if (nelm_out)
+ *nelm_out = nelm;
+ return 1;
+ }
+
+ nvlist += encode_size; /* goto the next nvpair */
+ }
+ return 0;
+}
+
+int
+zfs_nvlist_lookup_uint64(char *nvlist, char *name, uint64_t *out)
+{
+ char *nvpair;
+ size_t size;
+ int found;
+
+ found = nvlist_find_value(nvlist, name, DATA_TYPE_UINT64, &nvpair, &size, 0);
+ if (!found)
+ return 0;
+ if (size < sizeof(uint64_t)) {
+ printf("invalid uint64\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ *out = be64_to_cpu(*(uint64_t *) nvpair);
+ return 1;
+}
+
+char *
+zfs_nvlist_lookup_string(char *nvlist, char *name)
+{
+ char *nvpair;
+ char *ret;
+ size_t slen;
+ size_t size;
+ int found;
+
+ found = nvlist_find_value(nvlist, name, DATA_TYPE_STRING, &nvpair, &size, 0);
+ if (!found)
+ return 0;
+ if (size < 4) {
+ printf("invalid string\n");
+ return 0;
+ }
+ slen = be32_to_cpu(*(uint32_t *) nvpair);
+ if (slen > size - 4)
+ slen = size - 4;
+ ret = malloc(slen + 1);
+ if (!ret)
+ return 0;
+ memcpy(ret, nvpair + 4, slen);
+ ret[slen] = 0;
+ return ret;
+}
+
+char *
+zfs_nvlist_lookup_nvlist(char *nvlist, char *name)
+{
+ char *nvpair;
+ char *ret;
+ size_t size;
+ int found;
+
+ found = nvlist_find_value(nvlist, name, DATA_TYPE_NVLIST, &nvpair,
+ &size, 0);
+ if (!found)
+ return 0;
+ ret = calloc(1, size + 3 * sizeof(uint32_t));
+ if (!ret)
+ return 0;
+ memcpy(ret, nvlist, sizeof(uint32_t));
+
+ memcpy(ret + sizeof(uint32_t), nvpair, size);
+ return ret;
+}
+
+int
+zfs_nvlist_lookup_nvlist_array_get_nelm(char *nvlist, char *name)
+{
+ char *nvpair;
+ size_t nelm, size;
+ int found;
+
+ found = nvlist_find_value(nvlist, name, DATA_TYPE_NVLIST, &nvpair,
+ &size, &nelm);
+ if (!found)
+ return -1;
+ return nelm;
+}
+
+char *
+zfs_nvlist_lookup_nvlist_array(char *nvlist, char *name,
+ size_t index)
+{
+ char *nvpair, *nvpairptr;
+ int found;
+ char *ret;
+ size_t size;
+ unsigned i;
+ size_t nelm;
+
+ found = nvlist_find_value(nvlist, name, DATA_TYPE_NVLIST, &nvpair,
+ &size, &nelm);
+ if (!found)
+ return 0;
+ if (index >= nelm) {
+ printf("trying to lookup past nvlist array\n");
+ return 0;
+ }
+
+ nvpairptr = nvpair;
+
+ for (i = 0; i < index; i++) {
+ uint32_t encode_size;
+
+ /* skip the header, nvl_version, and nvl_nvflag */
+ nvpairptr = nvpairptr + 4 * 2;
+
+ while (nvpairptr < nvpair + size
+ && (encode_size = be32_to_cpu(*(uint32_t *) nvpairptr)))
+ nvlist += encode_size; /* goto the next nvpair */
+
+ nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
+ }
+
+ if (nvpairptr >= nvpair + size
+ || nvpairptr + be32_to_cpu(*(uint32_t *) (nvpairptr + 4 * 2))
+ >= nvpair + size) {
+ printf("incorrect nvlist array\n");
+ return 0;
+ }
+
+ ret = calloc(1, be32_to_cpu(*(uint32_t *) (nvpairptr + 4 * 2))
+ + 3 * sizeof(uint32_t));
+ if (!ret)
+ return 0;
+ memcpy(ret, nvlist, sizeof(uint32_t));
+
+ memcpy(ret + sizeof(uint32_t), nvpairptr, size);
+ return ret;
+}
+
+static int
+int_zfs_fetch_nvlist(struct zfs_data *data, char **nvlist)
+{
+ int err;
+
+ *nvlist = malloc(VDEV_PHYS_SIZE);
+ /* Read in the vdev name-value pair list (112K). */
+ err = zfs_devread(data->vdev_phys_sector, 0, VDEV_PHYS_SIZE, *nvlist);
+ if (err) {
+ free(*nvlist);
+ *nvlist = 0;
+ return err;
+ }
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * Check the disk label information and retrieve needed vdev name-value pairs.
+ *
+ */
+static int
+check_pool_label(struct zfs_data *data)
+{
+ uint64_t pool_state;
+ char *nvlist; /* for the pool */
+ char *vdevnvlist; /* for the vdev */
+ uint64_t diskguid;
+ uint64_t version;
+ int found;
+ int err;
+
+ err = int_zfs_fetch_nvlist(data, &nvlist);
+ if (err)
+ return err;
+
+ found = zfs_nvlist_lookup_uint64(nvlist, ZPOOL_CONFIG_POOL_STATE,
+ &pool_state);
+ if (!found) {
+ free(nvlist);
+ printf("zfs pool state not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ if (pool_state == POOL_STATE_DESTROYED) {
+ free(nvlist);
+ printf("zpool is marked as destroyed\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ data->label_txg = 0;
+ found = zfs_nvlist_lookup_uint64(nvlist, ZPOOL_CONFIG_POOL_TXG,
+ &data->label_txg);
+ if (!found) {
+ free(nvlist);
+ printf("zfs pool txg not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ /* not an active device */
+ if (data->label_txg == 0) {
+ free(nvlist);
+ printf("zpool is not active\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ found = zfs_nvlist_lookup_uint64(nvlist, ZPOOL_CONFIG_VERSION,
+ &version);
+ if (!found) {
+ free(nvlist);
+ printf("zpool config version not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ if (version > SPA_VERSION) {
+ free(nvlist);
+ printf("SPA version too new %llu > %llu\n",
+ (unsigned long long) version,
+ (unsigned long long) SPA_VERSION);
+ return ZFS_ERR_NOT_IMPLEMENTED_YET;
+ }
+
+ vdevnvlist = zfs_nvlist_lookup_nvlist(nvlist, ZPOOL_CONFIG_VDEV_TREE);
+ if (!vdevnvlist) {
+ free(nvlist);
+ printf("ZFS config vdev tree not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ found = zfs_nvlist_lookup_uint64(vdevnvlist, ZPOOL_CONFIG_ASHIFT,
+ &data->vdev_ashift);
+ free(vdevnvlist);
+ if (!found) {
+ free(nvlist);
+ printf("ZPOOL config ashift not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ found = zfs_nvlist_lookup_uint64(nvlist, ZPOOL_CONFIG_GUID, &diskguid);
+ if (!found) {
+ free(nvlist);
+ printf("ZPOOL config guid not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ found = zfs_nvlist_lookup_uint64(nvlist, ZPOOL_CONFIG_POOL_GUID, &data->pool_guid);
+ if (!found) {
+ free(nvlist);
+ printf("ZPOOL config pool guid not found\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ free(nvlist);
+
+ printf("ZFS Pool GUID: %llu (%016llx) Label: GUID: %llu (%016llx), txg: %llu, SPA v%llu, ashift: %llu\n",
+ (unsigned long long) data->pool_guid,
+ (unsigned long long) data->pool_guid,
+ (unsigned long long) diskguid,
+ (unsigned long long) diskguid,
+ (unsigned long long) data->label_txg,
+ (unsigned long long) version,
+ (unsigned long long) data->vdev_ashift);
+
+ return ZFS_ERR_NONE;
+}
+
+/*
+ * vdev_label_start returns the physical disk offset (in bytes) of
+ * label "l".
+ */
+static uint64_t vdev_label_start(uint64_t psize, int l)
+{
+ return (l * sizeof(vdev_label_t) + (l < VDEV_LABELS / 2 ?
+ 0 : psize -
+ VDEV_LABELS * sizeof(vdev_label_t)));
+}
+
+void
+zfs_unmount(struct zfs_data *data)
+{
+ free(data->dnode_buf);
+ free(data->dnode_mdn);
+ free(data->file_buf);
+ free(data);
+}
+
+/*
+ * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
+ * to the memory address MOS.
+ *
+ */
+struct zfs_data *
+zfs_mount(device_t dev)
+{
+ struct zfs_data *data = 0;
+ int label = 0, bestlabel = -1;
+ char *ub_array;
+ uberblock_t *ubbest;
+ uberblock_t *ubcur = NULL;
+ void *osp = 0;
+ size_t ospsize;
+ int err;
+
+ data = malloc(sizeof(*data));
+ if (!data)
+ return 0;
+ memset(data, 0, sizeof(*data));
+
+ ub_array = malloc(VDEV_UBERBLOCK_RING);
+ if (!ub_array) {
+ zfs_unmount(data);
+ return 0;
+ }
+
+ ubbest = malloc(sizeof(*ubbest));
+ if (!ubbest) {
+ zfs_unmount(data);
+ return 0;
+ }
+ memset(ubbest, 0, sizeof(*ubbest));
+
+ /*
+ * some eltorito stacks don't give us a size and
+ * we end up setting the size to MAXUINT, further
+ * some of these devices stop working once a single
+ * read past the end has been issued. Checking
+ * for a maximum part_length and skipping the backup
+ * labels at the end of the slice/partition/device
+ * avoids breaking down on such devices.
+ */
+ const int vdevnum =
+ dev->part_length == 0 ?
+ VDEV_LABELS / 2 : VDEV_LABELS;
+
+ /* Size in bytes of the device (disk or partition) aligned to label size*/
+ uint64_t device_size =
+ dev->part_length << SECTOR_BITS;
+
+ const uint64_t alignedbytes =
+ P2ALIGN(device_size, (uint64_t) sizeof(vdev_label_t));
+
+ for (label = 0; label < vdevnum; label++) {
+ uint64_t labelstartbytes = vdev_label_start(alignedbytes, label);
+ uint64_t labelstart = labelstartbytes >> SECTOR_BITS;
+
+ debug("zfs reading label %d at sector %llu (byte %llu)\n",
+ label, (unsigned long long) labelstart,
+ (unsigned long long) labelstartbytes);
+
+ data->vdev_phys_sector = labelstart +
+ ((VDEV_SKIP_SIZE + VDEV_BOOT_HEADER_SIZE) >> SECTOR_BITS);
+
+ err = check_pool_label(data);
+ if (err) {
+ printf("zfs error checking label %d\n", label);
+ continue;
+ }
+
+ /* Read in the uberblock ring (128K). */
+ err = zfs_devread(data->vdev_phys_sector +
+ (VDEV_PHYS_SIZE >> SECTOR_BITS),
+ 0, VDEV_UBERBLOCK_RING, ub_array);
+ if (err) {
+ printf("zfs error reading uberblock ring for label %d\n", label);
+ continue;
+ }
+
+ ubcur = find_bestub(ub_array, data);
+ if (!ubcur) {
+ printf("zfs No good uberblocks found in label %d\n", label);
+ continue;
+ }
+
+ if (vdev_uberblock_compare(ubcur, ubbest) > 0) {
+ /* Looks like the block is good, so use it.*/
+ memcpy(ubbest, ubcur, sizeof(*ubbest));
+ bestlabel = label;
+ debug("zfs Current best uberblock found in label %d\n", label);
+ }
+ }
+ free(ub_array);
+
+ /* We zero'd the structure to begin with. If we never assigned to it,
+ magic will still be zero. */
+ if (!ubbest->ub_magic) {
+ printf("couldn't find a valid ZFS label\n");
+ zfs_unmount(data);
+ free(ubbest);
+ return 0;
+ }
+
+ debug("zfs ubbest %p in label %d\n", ubbest, bestlabel);
+
+ zfs_endian_t ub_endian =
+ zfs_to_cpu64(ubbest->ub_magic, LITTLE_ENDIAN) == UBERBLOCK_MAGIC
+ ? LITTLE_ENDIAN : BIG_ENDIAN;
+
+ debug("zfs endian set to %s\n", !ub_endian ? "big" : "little");
+
+ err = zio_read(&ubbest->ub_rootbp, ub_endian, &osp, &ospsize, data);
+
+ if (err) {
+ printf("couldn't zio_read object directory\n");
+ zfs_unmount(data);
+ free(ubbest);
+ return 0;
+ }
+
+ if (ospsize < OBJSET_PHYS_SIZE_V14) {
+ printf("osp too small\n");
+ zfs_unmount(data);
+ free(osp);
+ free(ubbest);
+ return 0;
+ }
+
+ /* Got the MOS. Save it at the memory addr MOS. */
+ memmove(&(data->mos.dn), &((objset_phys_t *) osp)->os_meta_dnode, DNODE_SIZE);
+ data->mos.endian =
+ (zfs_to_cpu64(ubbest->ub_rootbp.blk_prop, ub_endian) >> 63) & 1;
+ memmove(&(data->current_uberblock), ubbest, sizeof(uberblock_t));
+
+ free(osp);
+ free(ubbest);
+
+ return data;
+}
+
+int
+zfs_fetch_nvlist(device_t dev, char **nvlist)
+{
+ struct zfs_data *zfs;
+ int err;
+
+ zfs = zfs_mount(dev);
+ if (!zfs)
+ return ZFS_ERR_BAD_FS;
+ err = int_zfs_fetch_nvlist(zfs, nvlist);
+ zfs_unmount(zfs);
+ return err;
+}
+
+/*
+ * zfs_open() locates a file in the rootpool by following the
+ * MOS and places the dnode of the file in the memory address DNODE.
+ */
+int
+zfs_open(struct zfs_file *file, const char *fsfilename)
+{
+ struct zfs_data *data;
+ int err;
+ int isfs;
+
+ data = zfs_mount(file->device);
+ if (!data)
+ return ZFS_ERR_BAD_FS;
+
+ err = dnode_get_fullpath(fsfilename, &(data->mdn), 0,
+ &(data->dnode), &isfs, data);
+ if (err) {
+ zfs_unmount(data);
+ return err;
+ }
+
+ if (isfs) {
+ zfs_unmount(data);
+ printf("Missing @ or / separator\n");
+ return ZFS_ERR_FILE_NOT_FOUND;
+ }
+
+ /* We found the dnode for this file. Verify if it is a plain file. */
+ if (data->dnode.dn.dn_type != DMU_OT_PLAIN_FILE_CONTENTS) {
+ zfs_unmount(data);
+ printf("not a file\n");
+ return ZFS_ERR_BAD_FILE_TYPE;
+ }
+
+ /* get the file size and set the file position to 0 */
+
+ /*
+ * For DMU_OT_SA we will need to locate the SIZE attribute
+ * attribute, which could be either in the bonus buffer
+ * or the "spill" block.
+ */
+ if (data->dnode.dn.dn_bonustype == DMU_OT_SA) {
+ void *sahdrp;
+ int hdrsize;
+
+ if (data->dnode.dn.dn_bonuslen != 0) {
+ sahdrp = (sa_hdr_phys_t *) DN_BONUS(&data->dnode.dn);
+ } else if (data->dnode.dn.dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
+ blkptr_t *bp = &data->dnode.dn.dn_spill;
+
+ err = zio_read(bp, data->dnode.endian, &sahdrp, NULL, data);
+ if (err)
+ return err;
+ } else {
+ printf("filesystem is corrupt :(\n");
+ return ZFS_ERR_BAD_FS;
+ }
+
+ hdrsize = SA_HDR_SIZE(((sa_hdr_phys_t *) sahdrp));
+ file->size = *(uint64_t *) ((char *) sahdrp + hdrsize + SA_SIZE_OFFSET);
+ } else {
+ file->size = zfs_to_cpu64(((znode_phys_t *) DN_BONUS(&data->dnode.dn))->zp_size, data->dnode.endian);
+ }
+
+ file->data = data;
+ file->offset = 0;
+
+ return ZFS_ERR_NONE;
+}
+
+uint64_t
+zfs_read(zfs_file_t file, char *buf, uint64_t len)
+{
+ struct zfs_data *data = (struct zfs_data *) file->data;
+ int blksz, movesize;
+ uint64_t length;
+ int64_t red;
+ int err;
+
+ if (data->file_buf == NULL) {
+ data->file_buf = malloc(SPA_MAXBLOCKSIZE);
+ if (!data->file_buf)
+ return -1;
+ data->file_start = data->file_end = 0;
+ }
+
+ /*
+ * If offset is in memory, move it into the buffer provided and return.
+ */
+ if (file->offset >= data->file_start
+ && file->offset + len <= data->file_end) {
+ memmove(buf, data->file_buf + file->offset - data->file_start,
+ len);
+ return len;
+ }
+
+ blksz = zfs_to_cpu16(data->dnode.dn.dn_datablkszsec,
+ data->dnode.endian) << SPA_MINBLOCKSHIFT;
+
+ /*
+ * Entire Dnode is too big to fit into the space available. We
+ * will need to read it in chunks. This could be optimized to
+ * read in as large a chunk as there is space available, but for
+ * now, this only reads in one data block at a time.
+ */
+ length = len;
+ red = 0;
+ while (length) {
+ void *t;
+ /*
+ * Find requested blkid and the offset within that block.
+ */
+ uint64_t blkid = file->offset + red;
+ blkid = do_div(blkid, blksz);
+ free(data->file_buf);
+ data->file_buf = 0;
+
+ err = dmu_read(&(data->dnode), blkid, &t,
+ 0, data);
+ data->file_buf = t;
+ if (err)
+ return -1;
+
+ data->file_start = blkid * blksz;
+ data->file_end = data->file_start + blksz;
+
+ movesize = MIN(length, data->file_end - (int) file->offset - red);
+
+ memmove(buf, data->file_buf + file->offset + red
+ - data->file_start, movesize);
+ buf += movesize;
+ length -= movesize;
+ red += movesize;
+ }
+
+ return len;
+}
+
+int
+zfs_close(zfs_file_t file)
+{
+ zfs_unmount((struct zfs_data *) file->data);
+ return ZFS_ERR_NONE;
+}
+
+int
+zfs_getmdnobj(device_t dev, const char *fsfilename,
+ uint64_t *mdnobj)
+{
+ struct zfs_data *data;
+ int err;
+ int isfs;
+
+ data = zfs_mount(dev);
+ if (!data)
+ return ZFS_ERR_BAD_FS;
+
+ err = dnode_get_fullpath(fsfilename, &(data->mdn), mdnobj,
+ &(data->dnode), &isfs, data);
+ zfs_unmount(data);
+ return err;
+}
+
+static void
+fill_fs_info(struct zfs_dirhook_info *info,
+ dnode_end_t mdn, struct zfs_data *data)
+{
+ int err;
+ dnode_end_t dn;
+ uint64_t objnum;
+ uint64_t headobj;
+
+ memset(info, 0, sizeof(*info));
+
+ info->dir = 1;
+
+ if (mdn.dn.dn_type == DMU_OT_DSL_DIR) {
+ headobj = zfs_to_cpu64(((dsl_dir_phys_t *) DN_BONUS(&mdn.dn))->dd_head_dataset_obj, mdn.endian);
+
+ err = dnode_get(&(data->mos), headobj, DMU_OT_DSL_DATASET, &mdn, data);
+ if (err) {
+ printf("zfs failed here 1\n");
+ return;
+ }
+ }
+ make_mdn(&mdn, data);
+ err = dnode_get(&mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
+ &dn, data);
+ if (err) {
+ printf("zfs failed here 2\n");
+ return;
+ }
+
+ err = zap_lookup(&dn, ZFS_ROOT_OBJ, &objnum, data);
+ if (err) {
+ printf("zfs failed here 3\n");
+ return;
+ }
+
+ err = dnode_get(&mdn, objnum, 0, &dn, data);
+ if (err) {
+ printf("zfs failed here 4\n");
+ return;
+ }
+
+ info->mtimeset = 1;
+ info->mtime = zfs_to_cpu64(((znode_phys_t *) DN_BONUS(&dn.dn))->zp_mtime[0], dn.endian);
+
+ return;
+}
+
+static int iterate_zap(const char *name, uint64_t val, struct zfs_data *data)
+{
+ struct zfs_dirhook_info info;
+ dnode_end_t dn;
+
+ memset(&info, 0, sizeof(info));
+
+ dnode_get(&(data->mdn), val, 0, &dn, data);
+ info.mtimeset = 1;
+ info.mtime = zfs_to_cpu64(((znode_phys_t *) DN_BONUS(&dn.dn))->zp_mtime[0], dn.endian);
+ info.dir = (dn.dn.dn_type == DMU_OT_DIRECTORY_CONTENTS);
+ debug("zfs type=%d, name=%s\n",
+ (int)dn.dn.dn_type, (char *)name);
+ if (!data->userhook)
+ return 0;
+ return data->userhook(name, &info);
+}
+
+static int iterate_zap_fs(const char *name, uint64_t val, struct zfs_data *data)
+{
+ struct zfs_dirhook_info info;
+ dnode_end_t mdn;
+ int err;
+ err = dnode_get(&(data->mos), val, 0, &mdn, data);
+ if (err)
+ return 0;
+ if (mdn.dn.dn_type != DMU_OT_DSL_DIR)
+ return 0;
+
+ fill_fs_info(&info, mdn, data);
+
+ if (!data->userhook)
+ return 0;
+ return data->userhook(name, &info);
+}
+
+static int iterate_zap_snap(const char *name, uint64_t val, struct zfs_data *data)
+{
+ struct zfs_dirhook_info info;
+ char *name2;
+ int ret = 0;
+ dnode_end_t mdn;
+ int err;
+
+ err = dnode_get(&(data->mos), val, 0, &mdn, data);
+ if (err)
+ return 0;
+
+ if (mdn.dn.dn_type != DMU_OT_DSL_DATASET)
+ return 0;
+
+ fill_fs_info(&info, mdn, data);
+
+ name2 = malloc(strlen(name) + 2);
+ name2[0] = '@';
+ memcpy(name2 + 1, name, strlen(name) + 1);
+ if (data->userhook)
+ ret = data->userhook(name2, &info);
+ free(name2);
+ return ret;
+}
+
+int
+zfs_ls(device_t device, const char *path,
+ int (*hook)(const char *, const struct zfs_dirhook_info *))
+{
+ struct zfs_data *data;
+ int err;
+ int isfs;
+
+ data = zfs_mount(device);
+ if (!data)
+ return ZFS_ERR_BAD_FS;
+
+ data->userhook = hook;
+
+ err = dnode_get_fullpath(path, &(data->mdn), 0, &(data->dnode), &isfs, data);
+ if (err) {
+ zfs_unmount(data);
+ return err;
+ }
+ if (isfs) {
+ uint64_t childobj, headobj;
+ uint64_t snapobj;
+ dnode_end_t dn;
+ struct zfs_dirhook_info info;
+
+ fill_fs_info(&info, data->dnode, data);
+ hook("@", &info);
+
+ childobj = zfs_to_cpu64(((dsl_dir_phys_t *) DN_BONUS(&data->dnode.dn))->dd_child_dir_zapobj, data->dnode.endian);
+ headobj = zfs_to_cpu64(((dsl_dir_phys_t *) DN_BONUS(&data->dnode.dn))->dd_head_dataset_obj, data->dnode.endian);
+ err = dnode_get(&(data->mos), childobj,
+ DMU_OT_DSL_DIR_CHILD_MAP, &dn, data);
+ if (err) {
+ zfs_unmount(data);
+ return err;
+ }
+
+
+ zap_iterate(&dn, iterate_zap_fs, data);
+
+ err = dnode_get(&(data->mos), headobj, DMU_OT_DSL_DATASET, &dn, data);
+ if (err) {
+ zfs_unmount(data);
+ return err;
+ }
+
+ snapobj = zfs_to_cpu64(((dsl_dataset_phys_t *) DN_BONUS(&dn.dn))->ds_snapnames_zapobj, dn.endian);
+
+ err = dnode_get(&(data->mos), snapobj,
+ DMU_OT_DSL_DS_SNAP_MAP, &dn, data);
+ if (err) {
+ zfs_unmount(data);
+ return err;
+ }
+
+ zap_iterate(&dn, iterate_zap_snap, data);
+ } else {
+ if (data->dnode.dn.dn_type != DMU_OT_DIRECTORY_CONTENTS) {
+ zfs_unmount(data);
+ printf("not a directory\n");
+ return ZFS_ERR_BAD_FILE_TYPE;
+ }
+ zap_iterate(&(data->dnode), iterate_zap, data);
+ }
+ zfs_unmount(data);
+ return ZFS_ERR_NONE;
+}
diff --git a/qemu/roms/u-boot/fs/zfs/zfs_fletcher.c b/qemu/roms/u-boot/fs/zfs/zfs_fletcher.c
new file mode 100644
index 000000000..d4ddf3b4e
--- /dev/null
+++ b/qemu/roms/u-boot/fs/zfs/zfs_fletcher.c
@@ -0,0 +1,75 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/ctype.h>
+#include <asm/byteorder.h>
+#include "zfs_common.h"
+
+#include <zfs/zfs.h>
+#include <zfs/zio.h>
+#include <zfs/dnode.h>
+#include <zfs/uberblock_impl.h>
+#include <zfs/vdev_impl.h>
+#include <zfs/zio_checksum.h>
+#include <zfs/zap_impl.h>
+#include <zfs/zap_leaf.h>
+#include <zfs/zfs_znode.h>
+#include <zfs/dmu.h>
+#include <zfs/dmu_objset.h>
+#include <zfs/dsl_dir.h>
+#include <zfs/dsl_dataset.h>
+
+void
+fletcher_2_endian(const void *buf, uint64_t size,
+ zfs_endian_t endian,
+ zio_cksum_t *zcp)
+{
+ const uint64_t *ip = buf;
+ const uint64_t *ipend = ip + (size / sizeof(uint64_t));
+ uint64_t a0, b0, a1, b1;
+
+ for (a0 = b0 = a1 = b1 = 0; ip < ipend; ip += 2) {
+ a0 += zfs_to_cpu64(ip[0], endian);
+ a1 += zfs_to_cpu64(ip[1], endian);
+ b0 += a0;
+ b1 += a1;
+ }
+
+ zcp->zc_word[0] = cpu_to_zfs64(a0, endian);
+ zcp->zc_word[1] = cpu_to_zfs64(a1, endian);
+ zcp->zc_word[2] = cpu_to_zfs64(b0, endian);
+ zcp->zc_word[3] = cpu_to_zfs64(b1, endian);
+}
+
+void
+fletcher_4_endian(const void *buf, uint64_t size, zfs_endian_t endian,
+ zio_cksum_t *zcp)
+{
+ const uint32_t *ip = buf;
+ const uint32_t *ipend = ip + (size / sizeof(uint32_t));
+ uint64_t a, b, c, d;
+
+ for (a = b = c = d = 0; ip < ipend; ip++) {
+ a += zfs_to_cpu32(ip[0], endian);
+ b += a;
+ c += b;
+ d += c;
+ }
+
+ zcp->zc_word[0] = cpu_to_zfs64(a, endian);
+ zcp->zc_word[1] = cpu_to_zfs64(b, endian);
+ zcp->zc_word[2] = cpu_to_zfs64(c, endian);
+ zcp->zc_word[3] = cpu_to_zfs64(d, endian);
+}
diff --git a/qemu/roms/u-boot/fs/zfs/zfs_lzjb.c b/qemu/roms/u-boot/fs/zfs/zfs_lzjb.c
new file mode 100644
index 000000000..607dfbb51
--- /dev/null
+++ b/qemu/roms/u-boot/fs/zfs/zfs_lzjb.c
@@ -0,0 +1,85 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/ctype.h>
+#include <asm/byteorder.h>
+#include "zfs_common.h"
+
+#include <zfs/zfs.h>
+#include <zfs/zio.h>
+#include <zfs/dnode.h>
+#include <zfs/uberblock_impl.h>
+#include <zfs/vdev_impl.h>
+#include <zfs/zio_checksum.h>
+#include <zfs/zap_impl.h>
+#include <zfs/zap_leaf.h>
+#include <zfs/zfs_znode.h>
+#include <zfs/dmu.h>
+#include <zfs/dmu_objset.h>
+#include <zfs/dsl_dir.h>
+#include <zfs/dsl_dataset.h>
+
+#define MATCH_BITS 6
+#define MATCH_MIN 3
+#define OFFSET_MASK ((1 << (16 - MATCH_BITS)) - 1)
+
+/*
+ * Decompression Entry - lzjb
+ */
+#ifndef NBBY
+#define NBBY 8
+#endif
+
+int
+lzjb_decompress(void *s_start, void *d_start, uint32_t s_len,
+ uint32_t d_len)
+{
+ uint8_t *src = s_start;
+ uint8_t *dst = d_start;
+ uint8_t *d_end = (uint8_t *) d_start + d_len;
+ uint8_t *s_end = (uint8_t *) s_start + s_len;
+ uint8_t *cpy, copymap = 0;
+ int copymask = 1 << (NBBY - 1);
+
+ while (dst < d_end && src < s_end) {
+ if ((copymask <<= 1) == (1 << NBBY)) {
+ copymask = 1;
+ copymap = *src++;
+ }
+ if (src >= s_end) {
+ printf("lzjb decompression failed\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ if (copymap & copymask) {
+ int mlen = (src[0] >> (NBBY - MATCH_BITS)) + MATCH_MIN;
+ int offset = ((src[0] << NBBY) | src[1]) & OFFSET_MASK;
+ src += 2;
+ cpy = dst - offset;
+ if (src > s_end || cpy < (uint8_t *) d_start) {
+ printf("lzjb decompression failed\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ while (--mlen >= 0 && dst < d_end)
+ *dst++ = *cpy++;
+ } else {
+ *dst++ = *src++;
+ }
+ }
+ if (dst < d_end) {
+ printf("lzjb decompression failed\n");
+ return ZFS_ERR_BAD_FS;
+ }
+ return ZFS_ERR_NONE;
+}
diff --git a/qemu/roms/u-boot/fs/zfs/zfs_sha256.c b/qemu/roms/u-boot/fs/zfs/zfs_sha256.c
new file mode 100644
index 000000000..bd6b84745
--- /dev/null
+++ b/qemu/roms/u-boot/fs/zfs/zfs_sha256.c
@@ -0,0 +1,136 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/stat.h>
+#include <linux/time.h>
+#include <linux/ctype.h>
+#include <asm/byteorder.h>
+#include "zfs_common.h"
+
+#include <zfs/zfs.h>
+#include <zfs/zio.h>
+#include <zfs/dnode.h>
+#include <zfs/uberblock_impl.h>
+#include <zfs/vdev_impl.h>
+#include <zfs/zio_checksum.h>
+#include <zfs/zap_impl.h>
+#include <zfs/zap_leaf.h>
+#include <zfs/zfs_znode.h>
+#include <zfs/dmu.h>
+#include <zfs/dmu_objset.h>
+#include <zfs/dsl_dir.h>
+#include <zfs/dsl_dataset.h>
+
+/*
+ * SHA-256 checksum, as specified in FIPS 180-2, available at:
+ * http://csrc.nist.gov/cryptval
+ *
+ * This is a very compact implementation of SHA-256.
+ * It is designed to be simple and portable, not to be fast.
+ */
+
+/*
+ * The literal definitions according to FIPS180-2 would be:
+ *
+ * Ch(x, y, z) (((x) & (y)) ^ ((~(x)) & (z)))
+ * Maj(x, y, z) (((x) & (y)) | ((x) & (z)) | ((y) & (z)))
+ *
+ * We use logical equivalents which require one less op.
+ */
+#define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define Maj(x, y, z) (((x) & (y)) ^ ((z) & ((x) ^ (y))))
+#define Rot32(x, s) (((x) >> s) | ((x) << (32 - s)))
+#define SIGMA0(x) (Rot32(x, 2) ^ Rot32(x, 13) ^ Rot32(x, 22))
+#define SIGMA1(x) (Rot32(x, 6) ^ Rot32(x, 11) ^ Rot32(x, 25))
+#define sigma0(x) (Rot32(x, 7) ^ Rot32(x, 18) ^ ((x) >> 3))
+#define sigma1(x) (Rot32(x, 17) ^ Rot32(x, 19) ^ ((x) >> 10))
+
+static const uint32_t SHA256_K[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
+ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
+ 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
+ 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
+ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
+ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
+ 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
+ 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
+ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
+ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
+ 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+static void
+SHA256Transform(uint32_t *H, const uint8_t *cp)
+{
+ uint32_t a, b, c, d, e, f, g, h, t, T1, T2, W[64];
+
+ for (t = 0; t < 16; t++, cp += 4)
+ W[t] = (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | cp[3];
+
+ for (t = 16; t < 64; t++)
+ W[t] = sigma1(W[t - 2]) + W[t - 7] +
+ sigma0(W[t - 15]) + W[t - 16];
+
+ a = H[0]; b = H[1]; c = H[2]; d = H[3];
+ e = H[4]; f = H[5]; g = H[6]; h = H[7];
+
+ for (t = 0; t < 64; t++) {
+ T1 = h + SIGMA1(e) + Ch(e, f, g) + SHA256_K[t] + W[t];
+ T2 = SIGMA0(a) + Maj(a, b, c);
+ h = g; g = f; f = e; e = d + T1;
+ d = c; c = b; b = a; a = T1 + T2;
+ }
+
+ H[0] += a; H[1] += b; H[2] += c; H[3] += d;
+ H[4] += e; H[5] += f; H[6] += g; H[7] += h;
+}
+
+void
+zio_checksum_SHA256(const void *buf, uint64_t size,
+ zfs_endian_t endian, zio_cksum_t *zcp)
+{
+ uint32_t H[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
+ 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
+ uint8_t pad[128];
+ unsigned padsize = size & 63;
+ unsigned i;
+
+ for (i = 0; i < size - padsize; i += 64)
+ SHA256Transform(H, (uint8_t *)buf + i);
+
+ for (i = 0; i < padsize; i++)
+ pad[i] = ((uint8_t *)buf)[i];
+
+ for (pad[padsize++] = 0x80; (padsize & 63) != 56; padsize++)
+ pad[padsize] = 0;
+
+ for (i = 0; i < 8; i++)
+ pad[padsize++] = (size << 3) >> (56 - 8 * i);
+
+ for (i = 0; i < padsize; i += 64)
+ SHA256Transform(H, pad + i);
+
+ zcp->zc_word[0] = cpu_to_zfs64((uint64_t)H[0] << 32 | H[1],
+ endian);
+ zcp->zc_word[1] = cpu_to_zfs64((uint64_t)H[2] << 32 | H[3],
+ endian);
+ zcp->zc_word[2] = cpu_to_zfs64((uint64_t)H[4] << 32 | H[5],
+ endian);
+ zcp->zc_word[3] = cpu_to_zfs64((uint64_t)H[6] << 32 | H[7],
+ endian);
+}