{
unsigned char status;
int ncycles;
- unsigned char *buffer;
if(n_tr < 1)
return 0;
buffer++;
}
} else {
- unsigned short *buffer = tr->buffer;
+ unsigned short *buffer = (unsigned short *)tr->buffer;
ncycles = tr->nbytes >> 1;
while (ncycles> 0) {
buffer++;
}
} else {
- unsigned short *buffer = tr->buffer;
+ unsigned short *buffer = (unsigned short *)tr->buffer;
ncycles = tr->nbytes >> 1;
while (ncycles> 0) {
#include <stdio.h>
#include <stdlib.h>
+#include "yportenv.h"
+
static int nandsim_debug = 0;
#define debug(n, fmt, ...) \
static void idle(struct nandsim_private *ns, int line)
{
+ YAFFS_UNUSED(line);
+
ns->read_offset = -1;
ns->write_offset = -1;
ns->addr_offset = -1;
int nbytes, int line)
{
int from;
+
switch (nbytes) {
case 2:
case 5: /* contains an offset */
static void load_read_buffer(struct nandsim_private *ns)
{
int addr = get_page_address(ns);
+
debug(1, "Store read at address %d\n", addr);
ns->store->retrieve(ns->store, addr,ns->buffer);
}
static void save_write_buffer(struct nandsim_private *ns)
{
int addr = get_page_address(ns);
+
debug(1, "Store write at address %d\n", addr);
ns->store->store(ns->store, addr, ns->buffer);
}
static void check_read_buffer(struct nandsim_private *ns, int line)
{
+ YAFFS_UNUSED(ns);
+ YAFFS_UNUSED(line);
}
static void end_cmd(struct nandsim_private *ns, int line)
{
+ YAFFS_UNUSED(line);
ns->last_cmd_byte = 0xff;
}
static void set_busy(struct nandsim_private *ns, int cycles, int line)
{
+ YAFFS_UNUSED(line);
+
ns->busy_count = cycles;
}
static void read_id(struct nandsim_private *ns)
{
+ YAFFS_UNUSED(ns);
}
static void unsupported(struct nandsim_private *ns)
{
+ YAFFS_UNUSED(ns);
}
static void nandsim_cl_write(struct nandsim_private *ns, unsigned char val)
check_not_busy(ns, __LINE__);
if(ns->addr_expected < 1 ||
ns->addr_offset < 0 ||
- ns->addr_offset >= sizeof(ns->addr_buffer)){
+ ns->addr_offset >= (int)sizeof(ns->addr_buffer)){
debug(1, "Address write when not expected\n");
} else {
debug(1, "Address write when expecting %d bytes\n",
}
}
-static void nandsim_dl_write(struct nandsim_private *ns,
+static void nandsim_dl_write(struct nandsim_private *ns,
unsigned val,
int bus_width_shift)
{
include ../FrameworkRules.mk
-C_FLAGS += -DCONFIG_YAFFS_USE_PTHREADS
+CFLAGS += -DCONFIG_YAFFS_USE_PTHREADS -Werror
yaffs_test: $(FRAMEWORK_SOURCES) $(YAFFS_TEST_OBJS)
gcc $(CFLLAG) -o $@ $(YAFFS_TEST_OBJS) -lpthread
yaffs_lstat(str,&s);
- printf("%s inode %ld %d obj %x length %d mode %X ",str, de->d_ino, s.st_ino,de->d_dont_use,(int)s.st_size,s.st_mode);\
+ printf("%s inode %ld obj %d %p length %d mode %X ",
+ str, de->d_ino, s.st_ino,
+ de->d_dont_use, (int)s.st_size, s.st_mode);
+
if(de->d_ino != s.st_ino){
printf(" \n\n!!!! HEY inode mismatch\n\n");
error_line = __LINE__;
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include <unistd.h>
+#include <unistd.h>
-typedef struct
+typedef struct
{
u8 data[528]; // Data + spare
} yflash_Page;
typedef struct
{
yflash_Page page[32]; // The pages in the block
-
+
} yflash_Block;
static int CheckInit(struct yaffs_dev *dev)
{
static int initialised = 0;
-
int i;
-
-
int fSize;
int written;
-
yflash_Page p;
-
- if(initialised)
+
+ YAFFS_UNUSED(dev);
+
+ if(initialised)
{
return YAFFS_OK;
}
initialised = 1;
-
-
+
+
filedisk.nBlocks = (SIZE_IN_MB * 1024 * 1024)/(16 * 1024);
-
+
filedisk.handle = open("emfile-512-0", O_RDWR | O_CREAT, S_IREAD | S_IWRITE);
-
+
if(filedisk.handle < 0)
{
perror("Failed to open yaffs emulation file");
return YAFFS_FAIL;
}
-
-
+
+
fSize = lseek(filedisk.handle,0,SEEK_END);
-
+
if(fSize < SIZE_IN_MB * 1024 * 1024)
{
printf("Creating yaffs emulation file\n");
-
+
lseek(filedisk.handle,0,SEEK_SET);
-
+
memset(&p,0xff,sizeof(yflash_Page));
-
+
for(i = 0; i < SIZE_IN_MB * 1024 * 1024; i+= 512)
{
written = write(filedisk.handle,&p,sizeof(yflash_Page));
-
+
if(written != sizeof(yflash_Page))
{
printf("Write failed\n");
return YAFFS_FAIL;
}
- }
+ }
}
-
+
return 1;
}
int written;
CheckInit(dev);
-
-
-
+
if(data)
{
lseek(filedisk.handle,nand_chunk * 528,SEEK_SET);
written = write(filedisk.handle,data,512);
-
+
if(written != 512) return YAFFS_FAIL;
}
-
+
if(spare)
{
lseek(filedisk.handle,nand_chunk * 528 + 512,SEEK_SET);
written = write(filedisk.handle,spare,16);
-
+
if(written != 16) return YAFFS_FAIL;
}
-
-
- return YAFFS_OK;
+ return YAFFS_OK;
}
int nread;
CheckInit(dev);
-
-
-
+
if(data)
{
lseek(filedisk.handle,nand_chunk * 528,SEEK_SET);
nread = read(filedisk.handle,data,512);
-
+
if(nread != 512) return YAFFS_FAIL;
}
-
+
if(spare)
{
lseek(filedisk.handle,nand_chunk * 528 + 512,SEEK_SET);
nread= read(filedisk.handle,spare,16);
-
+
if(nread != 16) return YAFFS_FAIL;
}
-
-
- return YAFFS_OK;
+ return YAFFS_OK;
}
{
int i;
-
+
CheckInit(dev);
-
+
if(blockNumber < 0 || blockNumber >= filedisk.nBlocks)
{
yaffs_trace(YAFFS_TRACE_ALWAYS,
"Attempt to erase non-existant block %d\n",
blockNumber);
return YAFFS_FAIL;
- }
- else
- {
-
+ } else {
yflash_Page pg;
-
+
memset(&pg,0xff,sizeof(yflash_Page));
-
+
lseek(filedisk.handle, blockNumber * 32 * 528, SEEK_SET);
-
- for(i = 0; i < 32; i++)
- {
+
+ for(i = 0; i < 32; i++) {
write(filedisk.handle,&pg,528);
}
return YAFFS_OK;
}
-
}
int yflash_InitialiseNAND(struct yaffs_dev *dev)
{
-
+ YAFFS_UNUSED(dev);
return YAFFS_OK;
}
}
}
-
-
-
-
-static u8 localBuffer[PAGE_SIZE];
-
static char *NToName(char *buf,int n)
{
sprintf(buf,"emfile-2k-%d",n);
{
static int initialised = 0;
int i;
-
int blk;
-
if(initialised)
- {
return YAFFS_OK;
- }
initialised = 1;
-
srand(random_seed);
remaining_ops = (rand() % 1000) * 5;
memset(dummyBuffer,0xff,sizeof(dummyBuffer));
-
-
filedisk.nBlocks = SIZE_IN_MB * BLOCKS_PER_MB;
for(i = 0; i < MAX_HANDLES; i++)
for(i = 0,blk = 0; blk < filedisk.nBlocks; blk+=BLOCKS_PER_HANDLE,i++)
filedisk.handle[i] = GetBlockFileHandle(i);
-
return 1;
}
static int yflash2_EraseBlock(struct yaffs_dev *dev, int block_no)
{
- int i;
+ u32 i;
int h;
CheckInit();
u8 pg[PAGE_SIZE];
int syz = PAGE_SIZE;
- int pos;
memset(pg,0xff,syz);
-
h = filedisk.handle[(block_no / ( BLOCKS_PER_HANDLE))];
- lseek(h,((block_no % BLOCKS_PER_HANDLE) * dev->param.chunks_per_block) * PAGE_SIZE,SEEK_SET);
+ lseek(h,((block_no % BLOCKS_PER_HANDLE) * dev->param.chunks_per_block) * PAGE_SIZE, SEEK_SET);
for(i = 0; i < dev->param.chunks_per_block; i++)
- {
write(h,pg,PAGE_SIZE);
- }
- pos = lseek(h, 0,SEEK_CUR);
return YAFFS_OK;
}
static int yflash2_CheckBad(struct yaffs_dev *dev, int block_no)
{
- (void) dev;
- (void) block_no;
+ YAFFS_UNUSED(dev);
+ YAFFS_UNUSED(block_no);
return YAFFS_OK;
}
u32 *dataAddr = Chunk2DataAddr(dev,nand_chunk);
u32 *spareAddr = Chunk2SpareAddr(dev,nand_chunk);
+ YAFFS_UNUSED(data_len);
+
if(data)
{
m18_drv_FlashRead32(dataAddr,(u32 *)data,dev->param.total_bytes_per_chunk / 4);
static int m18_drv_InitialiseNAND(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
m18_drv_FlashInit();
/* Go through the blocks formatting them if they are not formatted */
/* Set up and execute transfer */
- tr[0].buffer = data;
+ tr[0].buffer = (u8 *)data;
tr[0].offset = 0;
tr[0].nbytes = data_len;
struct nand_chip *chip = dev_to_chip(dev);
u8 *buffer = dev_to_buffer(dev);
int nand_chunk = block_no * chip->pages_per_block;
- int ret;
-
struct nanddrv_transfer tr[1];
memset(buffer, 0, chip->spare_bytes_per_page);
tr[0].offset = chip->data_bytes_per_page;
tr[0].nbytes = chip->spare_bytes_per_page;
- ret = nanddrv_read_tr(chip, nand_chunk, tr, 1);
+ nanddrv_read_tr(chip, nand_chunk, tr, 1);
/* Check that bad block marker is not set */
if(yaffs_hweight8(buffer[0]) + yaffs_hweight8(buffer[1]) < 14)
#include "yaffs_nandsim_file.h"
-
+#include "yaffs_nand_drv.h"
#include "nandsim_file.h"
#include "nand_chip.h"
#include "yaffs_guts.h"
* Note: Only YAFFS headers are LGPL, YAFFS C code is covered by GPL.
*/
-#ifndef __YAFFS_NAND_DRV_H__
-#define __YAFFS_NAND_DRV_H__
+#ifndef __YAFFS_NAND_SIM_FILE_DRV_H__
+#define __YAFFS_NAND_SIM_FILE_DRV_H__
struct yaffs_dev;
-struct yaffs_dev *yaffs_nandsim_file_install(const char *dev_name,
+struct yaffs_dev *yaffs_nandsim_install_drv(const char *dev_name,
const char *backing_file_name,
int n_blocks);
u32 *dataAddr = Chunk2DataAddr(dev,nand_chunk);
u32 *spareAddr = Chunk2SpareAddr(dev,nand_chunk);
+ YAFFS_UNUSED(data_len);
+
if (data) {
nor_drv_FlashRead32(dataAddr,(u32 *)data,dev->param.total_bytes_per_chunk / sizeof(u32));
}
static int nor_drv_InitialiseNAND(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
nor_drv_FlashInit();
/* Go through the blocks formatting them if they are not formatted */
*/
int yaffsfs_CheckMemRegion(const void *addr, size_t size, int write_request)
{
+ YAFFS_UNUSED(size);
+ YAFFS_UNUSED(write_request);
+
if(!addr)
return -1;
return 0;
int result;
int next_urgent;
+ YAFFS_UNUSED(dummy);
+
/* Sleep for a bit to allow start up */
sleep(2);
-typedef struct
+typedef struct
{
u8 data[528]; // Data + spare
} yramdisk_page;
typedef struct
{
yramdisk_page page[32]; // The pages in the block
-
+
} yramdisk_block;
static int CheckInit(struct yaffs_dev *dev)
{
static int initialised = 0;
-
+
int i;
int fail = 0;
- //int nBlocks;
int nAllocated = 0;
-
- if(initialised)
+
+ if(initialised)
{
return YAFFS_OK;
}
initialised = 1;
-
-
+
+
ramdisk.nBlocks = (SIZE_IN_MB * 1024 * 1024)/(16 * 1024);
-
+
ramdisk.block = malloc(sizeof(yramdisk_block *) * ramdisk.nBlocks);
-
+
if(!ramdisk.block) return 0;
-
+
for(i=0; i <ramdisk.nBlocks; i++)
{
ramdisk.block[i] = NULL;
}
-
+
for(i=0; i <ramdisk.nBlocks && !fail; i++)
{
if((ramdisk.block[i] = malloc(sizeof(yramdisk_block))) == 0)
nAllocated++;
}
}
-
+
if(fail)
{
for(i = 0; i < nAllocated; i++)
kfree(ramdisk.block[i]);
}
kfree(ramdisk.block);
-
+
yaffs_trace(YAFFS_TRACE_ALWAYS,
"Allocation failed, could only allocate %dMB of %dMB requested.\n",
nAllocated/64,ramdisk.nBlocks * 528);
return 0;
}
-
-
+
+
return 1;
}
{
int blk;
int pg;
-
+
CheckInit(dev);
-
+
blk = nand_chunk/32;
pg = nand_chunk%32;
-
-
+
+
if(data)
{
memcpy(ramdisk.block[blk]->page[pg].data,data,512);
}
-
-
+
+
if(tags)
{
struct yaffs_packed_tags1 pt;
-
+
yaffs_pack_tags1(&pt,tags);
memcpy(&ramdisk.block[blk]->page[pg].data[512],&pt,sizeof(pt));
}
- return YAFFS_OK;
+ return YAFFS_OK;
}
int blk;
int pg;
-
+
CheckInit(dev);
-
+
blk = nand_chunk/32;
pg = nand_chunk%32;
-
-
+
+
if(data)
{
memcpy(data,ramdisk.block[blk]->page[pg].data,512);
}
-
-
+
+
if(tags)
{
struct yaffs_packed_tags1 pt;
-
+
memcpy(&pt,&ramdisk.block[blk]->page[pg].data[512],sizeof(pt));
yaffs_unpack_tags1(tags,&pt);
-
+
}
return YAFFS_OK;
int pg;
int i;
-
+
CheckInit(dev);
-
+
blk = nand_chunk/32;
pg = nand_chunk%32;
-
-
+
+
for(i = 0; i < 528; i++)
{
if(ramdisk.block[blk]->page[pg].data[i] != 0xFF)
int yramdisk_erase(struct yaffs_dev *dev, int blockNumber)
{
-
+
CheckInit(dev);
-
+
if(blockNumber < 0 || blockNumber >= ramdisk.nBlocks)
{
yaffs_trace(YAFFS_TRACE_ALWAYS,
memset(ramdisk.block[blockNumber],0xFF,sizeof(yramdisk_block));
return YAFFS_OK;
}
-
+
}
int yramdisk_initialise(struct yaffs_dev *dev)
{
- //dev->use_nand_ecc = 1; // force on use_nand_ecc which gets faked.
- // This saves us doing ECC checks.
-
+ YAFFS_UNUSED(dev);
+
return YAFFS_OK;
}
#define BLOCKS_PER_MEG ((1<<20)/(PAGES_PER_BLOCK * PAGE_DATA_SIZE))
-typedef struct
+typedef struct
{
u8 data[PAGE_TOTAL_SIZE]; // Data + spare
int empty; // is this empty?
typedef struct
{
nandemul_Page *page[PAGES_PER_BLOCK];
- int damaged;
+ int damaged;
} nandemul_Block;
{
#ifdef __KERNEL__
if(n > 0) schedule_timeout(n);
+#else
+ YAFFS_UNUSED(n);
#endif
}
static void nandemul_ReallyEraseBlock(int blockNumber)
{
int i;
-
+
nandemul_Block *blk;
-
+
if(blockNumber < 0 || blockNumber >= ned.nBlocks)
{
return;
}
-
+
blk = ned.block[blockNumber];
-
+
for(i = 0; i < PAGES_PER_BLOCK; i++)
{
memset(blk->page[i],0xff,sizeof(nandemul_Page));
static int CheckInit(void)
{
static int initialised = 0;
-
+
int i,j;
-
+
int fail = 0;
- int nBlocks;
+ int nBlocks;
int nAllocated = 0;
-
- if(initialised)
+
+ if(initialised)
{
return YAFFS_OK;
}
-
-
+
+
ned.nBlocks = nBlocks = nandemul2k_CalcNBlocks();
-
+
ned.block = malloc(sizeof(nandemul_Block*) * nBlocks );
-
+
if(!ned.block) return YAFFS_FAIL;
-
-
-
-
+
+
+
+
for(i=fail=0; i <nBlocks; i++)
{
-
+
nandemul_Block *blk;
-
+
if(!(blk = ned.block[i] = malloc(sizeof(nandemul_Block))))
{
fail = 1;
- }
+ }
else
{
for(j = 0; j < PAGES_PER_BLOCK; j++)
nAllocated++;
}
}
-
+
if(fail)
{
//Todo thump pages
-
+
for(i = 0; i < nAllocated; i++)
{
kfree(ned.block[i]);
}
kfree(ned.block);
-
+
yaffs_trace(YAFFS_TRACE_ALWAYS,
"Allocation failed, could only allocate %dMB of %dMB requested.\n",
nAllocated/64,sizeInMB);
return 0;
}
-
+
ned.nBlocks = nBlocks;
-
+
initialised = 1;
-
+
return 1;
}
int blk;
int pg;
int i;
-
+
u8 *x;
-
+
blk = nand_chunk/PAGES_PER_BLOCK;
pg = nand_chunk%PAGES_PER_BLOCK;
-
-
+
+
if(data)
{
x = ned.block[blk]->page[pg]->data;
-
+
for(i = 0; i < PAGE_DATA_SIZE; i++)
{
x[i] &=data[i];
ned.block[blk]->page[pg]->empty = 0;
}
-
-
+
+
if(tags)
{
x = &ned.block[blk]->page[pg]->data[PAGE_DATA_SIZE];
-
+
yaffs_pack_tags2((struct yaffs_packed_tags2 *)x,tags, !dev->param.no_tags_ecc);
-
+
}
-
+
if(tags || data)
{
nandemul_yield(1);
{
int blk;
int pg;
-
+
u8 *x;
-
-
+
+
blk = nand_chunk/PAGES_PER_BLOCK;
pg = nand_chunk%PAGES_PER_BLOCK;
-
-
+
+
if(data)
{
memcpy(data,ned.block[blk]->page[pg]->data,PAGE_DATA_SIZE);
}
-
-
+
+
if(tags)
{
x = &ned.block[blk]->page[pg]->data[PAGE_DATA_SIZE];
-
- yaffs_unpack_tags2(tags,(struct yaffs_packed_tags2 *)x, !dev->param.no_tags_ecc);
- }
-
- return YAFFS_OK;
-}
-
-
-static int nandemul2k_CheckChunkErased(struct yaffs_dev *dev,int nand_chunk)
-{
- int blk;
- int pg;
- int i;
-
-
- blk = nand_chunk/PAGES_PER_BLOCK;
- pg = nand_chunk%PAGES_PER_BLOCK;
-
-
- for(i = 0; i < PAGE_TOTAL_SIZE; i++)
- {
- if(ned.block[blk]->page[pg]->data[i] != 0xFF)
- {
- return YAFFS_FAIL;
- }
+ yaffs_unpack_tags2(tags,(struct yaffs_packed_tags2 *)x, !dev->param.no_tags_ecc);
}
return YAFFS_OK;
-
}
int nandemul2k_EraseBlockInNAND(struct yaffs_dev *dev, int blockNumber)
{
-
-
+ YAFFS_UNUSED(dev);
+
if(blockNumber < 0 || blockNumber >= ned.nBlocks)
{
yaffs_trace(YAFFS_TRACE_ALWAYS,
{
nandemul_ReallyEraseBlock(blockNumber);
}
-
+
return YAFFS_OK;
}
int nandemul2k_InitialiseNAND(struct yaffs_dev *dev)
{
+ YAFFS_UNUSED(dev);
+
CheckInit();
return YAFFS_OK;
}
-
+
int nandemul2k_MarkNANDBlockBad(struct yaffs_dev *dev, int block_no)
{
-
u8 *x;
-
+
+ YAFFS_UNUSED(dev);
+
x = &ned.block[block_no]->page[0]->data[PAGE_DATA_SIZE];
-
+
memset(x,0,sizeof(struct yaffs_packed_tags2));
-
-
+
+
return YAFFS_OK;
-
+
}
int nandemul2k_QueryNANDBlock(struct yaffs_dev *dev, int block_no, enum yaffs_block_state *state, u32 *seq_number)
struct yaffs_ext_tags tags;
int chunkNo;
+ YAFFS_UNUSED(dev);
+
*seq_number = 0;
-
+
chunkNo = block_no * dev->param.chunks_per_block;
-
+
nandemul2k_ReadChunkWithTagsFromNAND(dev,chunkNo,NULL,&tags);
if(tags.block_bad)
{
return YAFFS_OK;
}
-int nandemul2k_GetBytesPerChunk(void) { return PAGE_DATA_SIZE;}
+int nandemul2k_GetBytesPerChunk(void)
+{
+ return PAGE_DATA_SIZE;
+}
+
+int nandemul2k_GetChunksPerBlock(void)
+{
+ return PAGES_PER_BLOCK;
+}
-int nandemul2k_GetChunksPerBlock(void) { return PAGES_PER_BLOCK; }
-int nandemul2k_GetNumberOfBlocks(void) {return nandemul2k_CalcNBlocks();}
+int nandemul2k_GetNumberOfBlocks(void) {
+ return nandemul2k_CalcNBlocks();
+}
#endif //YAFFS_RAM_ENABLED
#include "yaffs_nandemul2k.h"
#include "yaffs_trace.h"
#include "yaffs_osglue.h"
+#include "yaffs_nandsim_file.h"
#include <errno.h>
#endif
}
+#if 0
+
static Y_LOFF_T yaffs_get_file_size(struct yaffs_obj *obj)
{
YCHAR *alias = NULL;
}
}
+#endif
r = min((long)(pd - pc), (long)(pn - pd - es));
vecswap(pb, pn - r, r);
r = pb - pa;
- if (r > es)
+ if (r > (int)es)
yaffs_qsort(a, r / es, es, cmp);
r = pd - pc;
- if (r > es) {
+ if (r > (int)es) {
/* Iterate rather than recurse to save stack space */
a = pn - r;
n = r / es;
struct yaffs_obj *reldir, const YCHAR *dirname);
static int yaffsfs_closedir_no_lock(yaffs_DIR *dirent);
-unsigned int yaffs_wr_attempts;
+int yaffs_wr_attempts;
/*
* Handle management.
static int yaffsfs_TooManyObjects(struct yaffs_dev *dev)
{
- int current_objects = dev->n_obj - dev->n_deleted_files;
+ u32 current_objects = dev->n_obj - dev->n_deleted_files;
if (dev->param.max_objects && current_objects > dev->param.max_objects)
return 1;
Y_LOFF_T startPos = 0;
Y_LOFF_T endPos = 0;
int nRead = 0;
- int nToRead = 0;
+ unsigned int nToRead = 0;
int totalRead = 0;
Y_LOFF_T maxRead;
u8 *buf = (u8 *) vbuf;
/* Not a reading handle */
yaffsfs_SetError(-EINVAL);
totalRead = -1;
- } else if (nbyte > YAFFS_MAX_FILE_SIZE) {
- yaffsfs_SetError(-EINVAL);
- totalRead = -1;
} else {
if (isPread)
startPos = offset;
endPos = pos + nbyte;
if (pos < 0 || pos > YAFFS_MAX_FILE_SIZE ||
- nbyte > YAFFS_MAX_FILE_SIZE ||
endPos < 0 || endPos > YAFFS_MAX_FILE_SIZE) {
totalRead = -1;
nbyte = 0;
buf += nRead;
}
- if (nRead == nToRead)
+ if (nRead == (int)nToRead)
nbyte -= nRead;
else
nbyte = 0; /* no more to read */
int nWritten = 0;
int totalWritten = 0;
int write_trhrough = 0;
- int nToWrite = 0;
+ unsigned int nToWrite = 0;
const u8 *buf = (const u8 *)vbuf;
if (yaffsfs_CheckMemRegion(vbuf, nbyte, 0) < 0) {
endPos = pos + nbyte;
if (pos < 0 || pos > YAFFS_MAX_FILE_SIZE ||
- nbyte > YAFFS_MAX_FILE_SIZE ||
endPos < 0 || endPos > YAFFS_MAX_FILE_SIZE) {
totalWritten = -1;
nbyte = 0;
buf += nWritten;
}
- if (nWritten == nToWrite)
+ if (nWritten == (int)nToWrite)
nbyte -= nToWrite;
else
nbyte = 0;
obj = yaffs_get_equivalent_obj(obj);
if (obj && buf) {
- buf->st_dev = (int)obj->my_dev->os_context;
+ buf->st_dev = 0;
buf->st_ino = obj->obj_id;
buf->st_mode = obj->yst_mode & ~S_IFMT;
if (dsc->nextReturn) {
dsc->de.d_ino =
yaffs_get_equivalent_obj(dsc->nextReturn)->obj_id;
- dsc->de.d_dont_use = (unsigned)dsc->nextReturn;
+ dsc->de.d_dont_use = dsc->nextReturn;
dsc->de.d_off = dsc->offset++;
yaffs_get_obj_name(dsc->nextReturn,
dsc->de.d_name, NAME_MAX);
unsigned short d_reclen; /* length of this dirent */
YUCHAR d_type; /* type of this record */
YCHAR d_name[NAME_MAX+1]; /* file name (null-terminated) */
- unsigned d_dont_use; /* debug: not for public consumption */
+ void *d_dont_use; /* debug pointer used by test harness */
};
typedef struct opaque_structure yaffs_DIR;
struct yaffs_stat {
- int st_dev; /* device */
+ int st_dev; /* device - unused*/
int st_ino; /* inode */
unsigned st_mode; /* protection */
int st_nlink; /* number of hard links */
#endif
+#define YAFFS_UNUSED(x) (void)(x)
+
#ifdef CONFIG_YAFFS_PROVIDE_DEFS
/* File types */
* Chunk bitmap manipulations
*/
-static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, int blk)
+static inline u8 *yaffs_block_bits(struct yaffs_dev *dev, u32 blk)
{
if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
yaffs_trace(YAFFS_TRACE_ERROR,
(dev->chunk_bit_stride * (blk - dev->internal_start_block));
}
-void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk)
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, u32 blk, u32 chunk)
{
- if (blk < dev->internal_start_block || blk > dev->internal_end_block ||
- chunk < 0 || chunk >= dev->param.chunks_per_block) {
+ if (blk < dev->internal_start_block ||
+ blk > dev->internal_end_block ||
+ chunk >= dev->param.chunks_per_block) {
yaffs_trace(YAFFS_TRACE_ERROR,
"Chunk Id (%d:%d) invalid",
blk, chunk);
}
}
-void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk)
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, u32 blk)
{
u8 *blk_bits = yaffs_block_bits(dev, blk);
memset(blk_bits, 0, dev->chunk_bit_stride);
}
-void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, u32 blk, u32 chunk)
{
u8 *blk_bits = yaffs_block_bits(dev, blk);
blk_bits[chunk / 8] &= ~(1 << (chunk & 7));
}
-void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, u32 blk, u32 chunk)
{
u8 *blk_bits = yaffs_block_bits(dev, blk);
blk_bits[chunk / 8] |= (1 << (chunk & 7));
}
-int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk)
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, u32 blk, u32 chunk)
{
u8 *blk_bits = yaffs_block_bits(dev, blk);
return (blk_bits[chunk / 8] & (1 << (chunk & 7))) ? 1 : 0;
}
-int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk)
+int yaffs_still_some_chunks(struct yaffs_dev *dev, u32 blk)
{
u8 *blk_bits = yaffs_block_bits(dev, blk);
- int i;
+ u32 i;
for (i = 0; i < dev->chunk_bit_stride; i++) {
if (*blk_bits)
return 0;
}
-int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk)
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, u32 blk)
{
u8 *blk_bits = yaffs_block_bits(dev, blk);
- int i;
+ u32 i;
int n = 0;
for (i = 0; i < dev->chunk_bit_stride; i++, blk_bits++)
#include "yaffs_guts.h"
-void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, int blk, int chunk);
-void yaffs_clear_chunk_bits(struct yaffs_dev *dev, int blk);
-void yaffs_clear_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
-void yaffs_set_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
-int yaffs_check_chunk_bit(struct yaffs_dev *dev, int blk, int chunk);
-int yaffs_still_some_chunks(struct yaffs_dev *dev, int blk);
-int yaffs_count_chunk_bits(struct yaffs_dev *dev, int blk);
+void yaffs_verify_chunk_bit_id(struct yaffs_dev *dev, u32 blk, u32 chunk);
+void yaffs_clear_chunk_bits(struct yaffs_dev *dev, u32 blk);
+void yaffs_clear_chunk_bit(struct yaffs_dev *dev, u32 blk, u32 chunk);
+void yaffs_set_chunk_bit(struct yaffs_dev *dev, u32 blk, u32 chunk);
+int yaffs_check_chunk_bit(struct yaffs_dev *dev, u32 blk, u32 chunk);
+int yaffs_still_some_chunks(struct yaffs_dev *dev, u32 blk);
+int yaffs_count_chunk_bits(struct yaffs_dev *dev, u32 blk);
#endif
struct yaffs_checkpt_chunk_hdr {
int version;
- int seq;
+ u32 seq;
u32 sum;
u32 xor;
} ;
static int yaffs_checkpt_erase(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
if (!dev->drv.drv_erase_fn)
return 0;
static void yaffs2_checkpt_find_erased_block(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
int blocks_avail = dev->n_erased_blocks - dev->param.n_reserved_blocks;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
dev->n_erased_blocks, dev->param.n_reserved_blocks,
blocks_avail, dev->checkpt_next_block);
- if (dev->checkpt_next_block >= 0 &&
+ if (dev->checkpt_next_block >= dev->internal_start_block &&
dev->checkpt_next_block <= dev->internal_end_block &&
blocks_avail > 0) {
}
yaffs_trace(YAFFS_TRACE_CHECKPOINT, "out of checkpt blocks");
- dev->checkpt_next_block = -1;
- dev->checkpt_cur_block = -1;
+ dev->checkpt_next_block = 0;
+ dev->checkpt_cur_block = 0;
}
static void yaffs2_checkpt_find_block(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
struct yaffs_ext_tags tags;
yaffs_trace(YAFFS_TRACE_CHECKPOINT,
if (dev->blocks_in_checkpt < dev->checkpt_max_blocks)
for (i = dev->checkpt_next_block; i <= dev->internal_end_block;
i++) {
- int chunk = i * dev->param.chunks_per_block;
+ u32 chunk = i * dev->param.chunks_per_block;
enum yaffs_block_state state;
u32 seq;
yaffs_trace(YAFFS_TRACE_CHECKPOINT, "found no more checkpt blocks");
- dev->checkpt_next_block = -1;
- dev->checkpt_cur_block = -1;
+ dev->checkpt_next_block = 0;
+ dev->checkpt_cur_block = 0;
}
int yaffs2_checkpt_open(struct yaffs_dev *dev, int writing)
dev->checkpt_byte_count = 0;
dev->checkpt_sum = 0;
dev->checkpt_xor = 0;
- dev->checkpt_cur_block = -1;
- dev->checkpt_cur_chunk = -1;
+ dev->checkpt_cur_block = 0;
+ dev->checkpt_cur_chunk = 0;
dev->checkpt_next_block = dev->internal_start_block;
if (writing) {
int offset_chunk;
struct yaffs_ext_tags tags;
- if (dev->checkpt_cur_block < 0) {
+ if (dev->checkpt_cur_block < dev->internal_start_block) {
yaffs2_checkpt_find_erased_block(dev);
dev->checkpt_cur_chunk = 0;
}
- if (dev->checkpt_cur_block < 0)
+ if (dev->checkpt_cur_block < dev->internal_start_block)
return 0;
tags.is_deleted = 0;
dev->checkpt_cur_chunk++;
if (dev->checkpt_cur_chunk >= dev->param.chunks_per_block) {
dev->checkpt_cur_chunk = 0;
- dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_block = 0;
}
memset(dev->checkpt_buffer, 0, dev->data_bytes_per_chunk);
data_bytes++;
dev->checkpt_byte_count++;
- if (dev->checkpt_byte_offs < 0 ||
- dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
+ if (dev->checkpt_byte_offs >= dev->data_bytes_per_chunk)
ok = yaffs2_checkpt_flush_buffer(dev);
}
while (i < n_bytes && ok) {
- if (dev->checkpt_byte_offs < 0 ||
- dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
+ if (dev->checkpt_byte_offs >= dev->data_bytes_per_chunk) {
- if (dev->checkpt_cur_block < 0) {
+ if (dev->checkpt_cur_block <
+ dev->internal_start_block) {
yaffs2_checkpt_find_block(dev);
dev->checkpt_cur_chunk = 0;
}
- if (dev->checkpt_cur_block < 0) {
+ if (dev->checkpt_cur_block <
+ dev->internal_start_block) {
ok = 0;
break;
}
dev->checkpt_page_seq++;
dev->checkpt_cur_chunk++;
- if (dev->checkpt_cur_chunk >=
+ if (dev->checkpt_cur_chunk >
dev->param.chunks_per_block)
- dev->checkpt_cur_block = -1;
+ dev->checkpt_cur_block = 0;
}
} else if (dev->checkpt_block_list) {
for (i = 0;
i < dev->blocks_in_checkpt &&
- dev->checkpt_block_list[i] >= 0; i++) {
- int blk = dev->checkpt_block_list[i];
+ dev->checkpt_block_list[i] > 0; i++) {
+ u32 blk = dev->checkpt_block_list[i];
struct yaffs_block_info *bi = NULL;
if (dev->internal_start_block <= blk &&
/* Function to manipulate block info */
static inline struct yaffs_block_info *yaffs_get_block_info(struct yaffs_dev
- *dev, int blk)
+ *dev, u32 blk)
{
if (blk < dev->internal_start_block || blk > dev->internal_end_block) {
yaffs_trace(YAFFS_TRACE_ERROR,
- "**>> yaffs: get_block_info block %d is not valid",
+ "**>> yaffs: get_block_info block %u is not valid",
blk);
BUG();
}
#define YAFFS_GC_GOOD_ENOUGH 2
#define YAFFS_GC_PASSIVE_THRESHOLD 4
+#define YAFFS_MAX_CACHE_USAGE 100000000
+
#include "yaffs_ecc.h"
/* Forward declarations */
static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
- const u8 *buffer, int n_bytes, int use_reserve);
+ const u8 *buffer, u32 n_bytes, int use_reserve);
static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name,
int buffer_size);
/* Function to calculate chunk and offset */
void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
- int *chunk_out, u32 *offset_out)
+ u32 *chunk_out, u32 *offset_out)
{
- int chunk;
+ u32 chunk;
u32 offset;
chunk = (u32) (addr >> dev->chunk_shift);
* be hellishly efficient.
*/
-static inline u32 calc_shifts_ceiling(u32 x)
+static inline u32 yaffs_calc_shifts_ceiling(u32 x)
{
int extra_bits;
int shifts;
}
}
-static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk,
+static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, u32 nand_chunk,
int erased_ok)
{
- int flash_block = nand_chunk / dev->param.chunks_per_block;
+ u32 flash_block = nand_chunk / dev->param.chunks_per_block;
struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block);
yaffs_handle_chunk_error(dev, bi, YAFFS_ECC_RESULT_FIXED);
int retval = YAFFS_OK;
u8 *data = yaffs_get_temp_buffer(dev);
struct yaffs_ext_tags tags;
- int result;
- result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
+ yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags);
if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR)
retval = YAFFS_FAIL;
int retval = YAFFS_OK;
struct yaffs_ext_tags temp_tags;
u8 *buffer = yaffs_get_temp_buffer(dev);
- int result;
- result = yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
+ yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags);
if (memcmp(buffer, data, dev->data_bytes_per_chunk) ||
temp_tags.obj_id != tags->obj_id ||
temp_tags.chunk_id != tags->chunk_id ||
static int yaffs_find_alloc_block(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
struct yaffs_block_info *bi;
if (dev->n_erased_blocks < 1) {
return -1;
}
- if (dev->n_erased_blocks < dev->param.n_reserved_blocks
+ if (dev->n_erased_blocks < (int)dev->param.n_reserved_blocks
&& dev->alloc_page == 0)
yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve");
dev->n_free_chunks--;
/* If the block is full set the state to full */
- if (dev->alloc_page >= dev->param.chunks_per_block) {
+ if (dev->alloc_page >= (int)dev->param.chunks_per_block) {
bi->block_state = YAFFS_BLOCK_STATE_FULL;
dev->alloc_block = -1;
}
return tn;
}
-static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id,
- int chunk_obj)
+static int yaffs_tags_match(const struct yaffs_ext_tags *tags, u32 obj_id,
+ u32 chunk_obj)
{
return (tags->chunk_id == chunk_obj &&
tags->obj_id == obj_id &&
}
-static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk,
- struct yaffs_ext_tags *tags, int obj_id,
- int inode_chunk)
+static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, u32 the_chunk,
+ struct yaffs_ext_tags *tags, u32 obj_id,
+ u32 inode_chunk)
{
- int j;
+ u32 j;
for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) {
if (yaffs_check_chunk_bit
*/
static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
if (dev->param.n_caches > 0) {
for (i = 0; i < dev->param.n_caches; i++) {
static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev)
{
struct yaffs_cache *cache;
- int usage;
- int i;
+ u32 usage;
+ u32 i;
if (dev->param.n_caches < 1)
return NULL;
* Find the LRU cache and flush it if it is dirty.
*/
- usage = -1;
+ usage = YAFFS_MAX_CACHE_USAGE + 100; /* Silly high number */
cache = NULL;
for (i = 0; i < dev->param.n_caches; i++) {
int chunk_id)
{
struct yaffs_dev *dev = obj->my_dev;
- int i;
+ u32 i;
if (dev->param.n_caches < 1)
return NULL;
static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache,
int is_write)
{
- int i;
-
if (dev->param.n_caches < 1)
return;
- if (dev->cache_last_use < 0 ||
- dev->cache_last_use > 100000000) {
+ if (dev->cache_last_use > YAFFS_MAX_CACHE_USAGE) {
+ u32 i;
+
/* Reset the cache usages */
for (i = 1; i < dev->param.n_caches; i++)
dev->cache[i].last_use = 0;
*/
static void yaffs_invalidate_whole_cache(struct yaffs_obj *in)
{
- int i;
+ u32 i;
struct yaffs_dev *dev = in->my_dev;
if (dev->param.n_caches > 0) {
return YAFFS_FAIL;
}
-
-void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no)
+void yaffs_block_became_dirty(struct yaffs_dev *dev, u32 block_no)
{
struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no);
int erased_ok = 0;
- int i;
+ u32 i;
/* If the block is still healthy erase it and mark as clean.
* If the block has had a data failure, then retire it.
{
int old_chunk;
int ret_val = YAFFS_OK;
- int i;
+ u32 i;
int is_checkpt_block;
int max_copies;
int chunks_before = yaffs_get_erased_chunks(dev);
static unsigned yaffs_find_gc_block(struct yaffs_dev *dev,
int aggressive, int background)
{
- int i;
- int iterations;
+ u32 i;
+ u32 iterations;
unsigned selected = 0;
int prioritised = 0;
int prioritised_exist = 0;
struct yaffs_block_info *bi;
- int threshold;
+ u32 threshold;
/* First let's see if we need to grab a prioritised block */
if (dev->has_pending_prioritised_gc && !aggressive) {
*/
if (!selected) {
- int pages_used;
- int n_blocks =
+ u32 pages_used;
+ u32 n_blocks =
dev->internal_end_block - dev->internal_start_block + 1;
if (aggressive) {
threshold = dev->param.chunks_per_block;
iterations = n_blocks;
} else {
- int max_threshold;
+ u32 max_threshold;
if (background)
max_threshold = dev->param.chunks_per_block / 2;
gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive);
}
- if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) &&
+ if (dev->n_erased_blocks < (int)dev->param.n_reserved_blocks &&
dev->gc_block > 0) {
yaffs_trace(YAFFS_TRACE_GC,
"yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d",
dev->n_erased_blocks, max_tries,
dev->gc_block);
}
- } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) &&
+ } while ((dev->n_erased_blocks < (int)dev->param.n_reserved_blocks) &&
(dev->gc_block > 0) && (max_tries < 2));
return aggressive ? gc_ok : YAFFS_OK;
}
static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk,
- const u8 *buffer, int n_bytes, int use_reserve)
+ const u8 *buffer, u32 n_bytes, int use_reserve)
{
/* Find old chunk Need to do this to get serial number
* Write new one and patch into tree.
if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) {
yaffs_trace(YAFFS_TRACE_ERROR,
- "Writing %d bytes to chunk!!!!!!!!!",
+ "Writing %u bytes to chunk!!!!!!!!!",
n_bytes);
BUG();
}
struct yaffs_obj_hdr *oh;
struct yaffs_dev *dev;
struct yaffs_ext_tags tags;
- int result;
- int alloc_failed = 0;
if (!in || !in->lazy_loaded || in->hdr_chunk < 1)
return;
in->lazy_loaded = 0;
buf = yaffs_get_temp_buffer(dev);
- result = yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
+ yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags);
oh = (struct yaffs_obj_hdr *)buf;
in->yst_mode = oh->yst_mode;
if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) {
in->variant.symlink_variant.alias =
yaffs_clone_str(oh->alias);
- if (!in->variant.symlink_variant.alias)
- alloc_failed = 1; /* Not returned */
}
yaffs_release_temp_buffer(dev, buf);
}
struct yaffs_dev *dev = in->my_dev;
int prev_chunk_id;
int ret_val = 0;
- int result = 0;
int new_chunk_id;
struct yaffs_ext_tags new_tags;
struct yaffs_ext_tags old_tags;
prev_chunk_id = in->hdr_chunk;
if (prev_chunk_id > 0) {
- result = yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
+ yaffs_rd_chunk_tags_nand(dev, prev_chunk_id,
buffer, &old_tags);
yaffs_verify_oh(in, oh, &old_tags, 0);
* Curve-balls: the first chunk might also be the last chunk.
*/
-int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes)
+int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, u32 n_bytes)
{
- int chunk;
+ u32 chunk;
u32 start;
- int n_copy;
- int n = n_bytes;
- int n_done = 0;
+ u32 n_copy;
+ u32 n = n_bytes;
+ u32 n_done = 0;
struct yaffs_cache *cache;
struct yaffs_dev *dev;
buffer += n_copy;
n_done += n_copy;
}
- return n_done;
+ return (int)n_done;
}
int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
- int n_bytes, int write_through)
+ u32 n_bytes, int write_through)
{
- int chunk;
+ u32 chunk;
u32 start;
- int n_copy;
- int n = n_bytes;
- int n_done = 0;
- int n_writeback;
+ u32 n_copy;
+ u32 n = n_bytes;
+ u32 n_done = 0;
+ u32 n_writeback;
loff_t start_write = offset;
int chunk_written = 0;
u32 n_bytes_read;
dev->data_bytes_per_chunk + start != offset ||
start >= dev->data_bytes_per_chunk) {
yaffs_trace(YAFFS_TRACE_ERROR,
- "AddrToChunk of offset %lld gives chunk %d start %d",
- offset, chunk, start);
+ "addr_to_chunk() of offset %lld gives chunk %u start %u",
+ (long long int)offset, chunk, start);
}
chunk++; /* File pos to chunk in file offset */
n_bytes_read = dev->data_bytes_per_chunk;
n_writeback =
- (n_bytes_read >
- (start + n)) ? n_bytes_read : (start + n);
+ (n_bytes_read > (start + n)) ?
+ n_bytes_read : (start + n);
- if (n_writeback < 0 ||
- n_writeback > dev->data_bytes_per_chunk)
+ if (n_writeback > dev->data_bytes_per_chunk)
BUG();
} else {
}
int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
- int n_bytes, int write_through)
+ u32 n_bytes, int write_through)
{
yaffs2_handle_hole(in, offset);
return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through);
struct yaffs_dev *dev = in->my_dev;
loff_t old_size = in->variant.file_variant.file_size;
- int i;
+ u32 i;
int chunk_id;
u32 dummy;
- int last_del;
- int start_del;
+ u32 last_del;
+ u32 start_del;
if (old_size > 0)
yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy);
if (chunk_id < 1)
continue;
- if (chunk_id <
+ if ((u32)chunk_id <
(dev->internal_start_block * dev->param.chunks_per_block) ||
- chunk_id >=
+ (u32)chunk_id >=
((dev->internal_end_block + 1) *
dev->param.chunks_per_block)) {
yaffs_trace(YAFFS_TRACE_ALWAYS,
void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size)
{
- int new_full;
+ u32 new_full;
u32 new_partial;
struct yaffs_dev *dev = obj->my_dev;
/*----------------------- Initialisation Scanning ---------------------- */
-void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, u32 obj_id,
int backward_scanning)
{
struct yaffs_obj *obj;
} else if (obj->short_name[0]) {
strcpy(name, obj->short_name);
} else if (obj->hdr_chunk > 0) {
- int result;
u8 *buffer = yaffs_get_temp_buffer(obj->my_dev);
struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer;
memset(buffer, 0, obj->my_dev->data_bytes_per_chunk);
- if (obj->hdr_chunk > 0) {
- result = yaffs_rd_chunk_tags_nand(obj->my_dev,
+ if (obj->hdr_chunk > 0)
+ yaffs_rd_chunk_tags_nand(obj->my_dev,
obj->hdr_chunk,
buffer, NULL);
- }
+
yaffs_load_name_from_oh(obj->my_dev, name, oh->name,
buffer_size);
int yaffs_guts_format_dev(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
enum yaffs_block_state state;
u32 dummy;
{
int init_failed = 0;
unsigned x;
- int bits;
+ unsigned bits;
if(yaffs_guts_ll_init(dev) != YAFFS_OK)
return YAFFS_FAIL;
x = dev->param.chunks_per_block * (dev->internal_end_block + 1);
- bits = calc_shifts_ceiling(x);
+ bits = yaffs_calc_shifts_ceiling(x);
/* Set up tnode width if wide tnodes are enabled. */
if (!dev->param.wide_tnodes_disabled) {
if (bits <= dev->tnode_width)
dev->chunk_grp_bits = 0;
else
- dev->chunk_grp_bits = (u16)(bits - dev->tnode_width);
+ dev->chunk_grp_bits = bits - dev->tnode_width;
dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8;
if (dev->tnode_size < sizeof(struct yaffs_tnode))
dev->gc_cleanup_list = NULL;
if (!init_failed && dev->param.n_caches > 0) {
- int i;
+ u32 i;
void *buf;
int cache_bytes =
dev->param.n_caches * sizeof(struct yaffs_cache);
void yaffs_deinitialise(struct yaffs_dev *dev)
{
if (dev->is_mounted) {
- int i;
+ u32 i;
yaffs_deinit_blocks(dev);
yaffs_deinit_tnodes_and_objs(dev);
int yaffs_count_free_chunks(struct yaffs_dev *dev)
{
- int n_free = 0;
- int b;
+ u32 n_free = 0;
+ u32 b;
struct yaffs_block_info *blk;
blk = dev->block_info;
}
blk++;
}
- return n_free;
+ return (int)n_free;
}
int yaffs_get_n_free_chunks(struct yaffs_dev *dev)
int n_free;
int n_dirty_caches;
int blocks_for_checkpt;
- int i;
+ u32 i;
n_free = dev->n_free_chunks;
n_free += dev->n_deleted_files;
void yaffs_count_blocks_by_state(struct yaffs_dev *dev, int bs[10])
{
- int i;
+ u32 i;
struct yaffs_block_info *bi;
int s;
struct yaffs_cache {
struct yaffs_obj *object;
int chunk_id;
- int last_use;
+ u32 last_use;
int dirty;
int n_bytes; /* Only valid if the cache is dirty */
int locked; /* Can't push out or flush while locked. */
struct yaffs_ext_tags {
unsigned chunk_used; /* Status of the chunk: used or unused */
- unsigned obj_id; /* If 0 this is not used */
+ u32 obj_id; /* If 0 this is not used */
unsigned chunk_id; /* If 0 this is a header, else a data chunk */
unsigned n_bytes; /* Only valid for data chunks */
enum yaffs_obj_type type;
/* Apply to everything */
- int parent_obj_id;
+ u32 parent_obj_id;
u16 sum_no_longer_used; /* checksum of name. No longer used */
YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
int inband_tags; /* Use unband tags */
u32 total_bytes_per_chunk; /* Should be >= 512, does not need to
be a power of 2 */
- int chunks_per_block; /* does not need to be a power of 2 */
- int spare_bytes_per_chunk; /* spare area size */
- int start_block; /* Start block we're allowed to use */
- int end_block; /* End block we're allowed to use */
- int n_reserved_blocks; /* Tuneable so that we can reduce
+ u32 chunks_per_block; /* does not need to be a power of 2 */
+ u32 spare_bytes_per_chunk; /* spare area size */
+ u32 start_block; /* Start block we're allowed to use */
+ u32 end_block; /* End block we're allowed to use */
+ u32 n_reserved_blocks; /* Tuneable so that we can reduce
* reserved blocks on NOR and RAM. */
- int n_caches; /* If <= 0, then short op caching is disabled,
+ u32 n_caches; /* If <= 0, then short op caching is disabled,
* else the number of short op caches.
*/
int cache_bypass_aligned; /* If non-zero then bypass the cache for
int enable_xattr; /* Enable xattribs */
- int max_objects; /*
+ u32 max_objects; /*
* Set to limit the number of objects created.
* 0 = no limit.
*/
int ll_init;
/* Runtime parameters. Set up by YAFFS. */
- int data_bytes_per_chunk;
+ u32 data_bytes_per_chunk;
/* Non-wide tnode stuff */
- u16 chunk_grp_bits; /* Number of bits that need to be resolved if
+ u32 chunk_grp_bits; /* Number of bits that need to be resolved if
* the tnodes are not wide enough.
*/
- u16 chunk_grp_size; /* == 2^^chunk_grp_bits */
+ u32 chunk_grp_size; /* == 2^^chunk_grp_bits */
/* Stuff to support wide tnodes */
u32 tnode_width;
int is_checkpointed;
/* Stuff to support block offsetting to support start block zero */
- int internal_start_block;
- int internal_end_block;
+ u32 internal_start_block;
+ u32 internal_end_block;
int block_offset;
int chunk_offset;
/* Runtime checkpointing stuff */
- int checkpt_page_seq; /* running sequence number of checkpt pages */
- int checkpt_byte_count;
- int checkpt_byte_offs;
+ u32 checkpt_page_seq; /* running sequence number of checkpt pages */
+ u32 checkpt_byte_count;
+ u32 checkpt_byte_offs;
u8 *checkpt_buffer;
int checkpt_open_write;
int blocks_in_checkpt;
- int checkpt_cur_chunk;
- int checkpt_cur_block;
- int checkpt_next_block;
- int *checkpt_block_list;
+ u32 checkpt_cur_chunk;
+ u32 checkpt_cur_block;
+ u32 checkpt_next_block;
+ u32 *checkpt_block_list;
int checkpt_max_blocks;
u32 checkpt_sum;
u32 checkpt_xor;
u8 *chunk_bits; /* bitmap of chunks in use */
u8 block_info_alt:1; /* allocated using alternative alloc */
u8 chunk_bits_alt:1; /* allocated using alternative alloc */
- int chunk_bit_stride; /* Number of bytes of chunk_bits per block.
+ u32 chunk_bit_stride; /* Number of bytes of chunk_bits per block.
* Must be consistent with chunks_per_block.
*/
int n_erased_blocks;
int alloc_block; /* Current block being allocated off */
- u32 alloc_page;
- int alloc_block_finder; /* Used to search for next allocation block */
+ int alloc_page;
+ u32 alloc_block_finder; /* Used to search for next allocation block */
/* Object and Tnode memory management */
void *allocator;
int doing_buffered_block_rewrite;
struct yaffs_cache *cache;
- int cache_last_use;
+ u32 cache_last_use;
/* Stuff for background deletion and unlinked files. */
struct yaffs_obj *unlinked_dir; /* Directory where unlinked and deleted
};
struct yaffs_shadow_fixer {
- int obj_id;
+ u32 obj_id;
int shadowed_id;
struct yaffs_shadow_fixer *next;
};
/* File operations */
int yaffs_file_rd(struct yaffs_obj *obj, u8 * buffer, loff_t offset,
- int n_bytes);
+ u32 n_bytes);
int yaffs_wr_file(struct yaffs_obj *obj, const u8 * buffer, loff_t offset,
- int n_bytes, int write_trhrough);
+ u32 n_bytes, int write_trhrough);
int yaffs_resize_file(struct yaffs_obj *obj, loff_t new_size);
struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent,
void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj);
YCHAR *yaffs_clone_str(const YCHAR *str);
void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list);
-void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no);
+void yaffs_block_became_dirty(struct yaffs_dev *dev, u32 block_no);
int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name,
int force, int is_shrink, int shadows,
struct yaffs_xattr_mod *xop);
-void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id,
+void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, u32 obj_id,
int backward_scanning);
int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks);
struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev);
struct yaffs_tnode *passed_tn);
int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset,
- int n_bytes, int write_trhrough);
+ u32 n_bytes, int write_trhrough);
void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size);
void yaffs_skip_rest_of_block(struct yaffs_dev *dev);
int yaffs_guts_format_dev(struct yaffs_dev *dev);
void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr,
- int *chunk_out, u32 *offset_out);
+ u32 *chunk_out, u32 *offset_out);
/*
* Marshalling functions to get loff_t file sizes into aand out of
* object headers.
int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk,
struct yaffs_ext_tags *tags);
+/* Global to control max write attempts */
+extern int yaffs_wr_attempts;
+
+
#endif
return pos;
}
pos += size;
- if (pos < xb_size - sizeof(int))
+ if (pos < xb_size - (int)sizeof(int))
memcpy(&size, xb + pos, sizeof(int));
else
size = 0;
memcpy(&size, xb + pos, sizeof(int));
while (size > 0 && (size < xb_size) && (pos + size < xb_size)) {
pos += size;
- if (pos < xb_size - sizeof(int))
+ if (pos < xb_size - (int)sizeof(int))
memcpy(&size, xb + pos, sizeof(int));
else
size = 0;
int filled = 0;
memcpy(&size, xb + pos, sizeof(int));
- while (size > sizeof(int) &&
+ while (size > (int)sizeof(int) &&
size <= xb_size &&
(pos + size) < xb_size &&
!filled) {
filled = 1;
}
pos += size;
- if (pos < xb_size - sizeof(int))
+ if (pos < xb_size - (int)sizeof(int))
memcpy(&size, xb + pos, sizeof(int));
else
size = 0;
u8 *buffer;
u8 *sum_buffer = (u8 *)st;
int n_bytes;
- int chunk_id;
+ u32 chunk_id;
int chunk_in_nand;
int chunk_in_block;
int result;
void yaffs_summary_gc(struct yaffs_dev *dev, int blk)
{
struct yaffs_block_info *bi = yaffs_get_block_info(dev, blk);
- int i;
+ u32 i;
if (!bi->has_summary)
return;
dev->n_ecc_fixed++;
}
- if (ecc_result < YAFFS_ECC_RESULT_UNFIXED)
+ if (retval == YAFFS_OK && ecc_result < YAFFS_ECC_RESULT_UNFIXED)
return YAFFS_OK;
else
return YAFFS_FAIL;
#define __YTRACE_H__
extern unsigned int yaffs_trace_mask;
-extern unsigned int yaffs_wr_attempts;
/*
* Tracing flags.
actually_used = bi->pages_in_use - bi->soft_del_pages;
if (bi->pages_in_use < 0 ||
- bi->pages_in_use > dev->param.chunks_per_block ||
+ bi->pages_in_use > (int)dev->param.chunks_per_block ||
bi->soft_del_pages < 0 ||
- bi->soft_del_pages > dev->param.chunks_per_block ||
- actually_used < 0 || actually_used > dev->param.chunks_per_block)
+ bi->soft_del_pages > (int)dev->param.chunks_per_block ||
+ actually_used < 0 || actually_used > (int)dev->param.chunks_per_block)
yaffs_trace(YAFFS_TRACE_VERIFY,
"Block %d has illegal values pages_in_used %d soft_del_pages %d",
n, bi->pages_in_use, bi->soft_del_pages);
void yaffs_verify_blocks(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
int state_count[YAFFS_NUMBER_OF_BLOCK_STATES];
int illegal_states = 0;
{
u32 x;
int required_depth;
- int actual_depth;
- int last_chunk;
+ u32 last_chunk;
u32 offset_in_chunk;
u32 the_chunk;
required_depth++;
}
- actual_depth = obj->variant.file_variant.top_level;
-
/* Check that the chunks in the tnode tree are all correct.
* We do this by scanning through the tnode tree and
* checking the tags for every chunk match.
int yaffs1_scan(struct yaffs_dev *dev)
{
struct yaffs_ext_tags tags;
- int blk;
- int result;
+ u32 blk;
int chunk;
- int c;
+ u32 c;
int deleted;
enum yaffs_block_state state;
LIST_HEAD(hard_list);
/* Read the tags and decide what to do */
chunk = blk * dev->param.chunks_per_block + c;
- result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
+ yaffs_rd_chunk_tags_nand(dev, chunk, NULL,
&tags);
/* Let's have a good look at this chunk... */
yaffs_set_chunk_bit(dev, blk, c);
bi->pages_in_use++;
- result = yaffs_rd_chunk_tags_nand(dev, chunk,
+ yaffs_rd_chunk_tags_nand(dev, chunk,
chunk_data,
NULL);
*/
void yaffs_calc_oldest_dirty_seq(struct yaffs_dev *dev)
{
- int i;
+ u32 i;
unsigned seq;
unsigned block_no = 0;
struct yaffs_block_info *b;
for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) {
if (b->block_state == YAFFS_BLOCK_STATE_FULL &&
(b->pages_in_use - b->soft_del_pages) <
- dev->param.chunks_per_block &&
+ (int)dev->param.chunks_per_block &&
b->seq_number < seq) {
seq = b->seq_number;
block_no = i;
static int yaffs2_wr_checkpt_dev(struct yaffs_dev *dev)
{
struct yaffs_checkpt_dev cp;
- u32 n_bytes;
+ int n_bytes;
u32 n_blocks = dev->internal_end_block - dev->internal_start_block + 1;
int ok;
static int yaffs2_rd_checkpt_dev(struct yaffs_dev *dev)
{
struct yaffs_checkpt_dev cp;
- u32 n_bytes;
+ int n_bytes;
u32 n_blocks =
(dev->internal_end_block - dev->internal_start_block + 1);
int ok;
sizeof(base_offset));
if (ok)
ok = (yaffs2_checkpt_wr(dev, tn, dev->tnode_size) ==
- dev->tnode_size);
+ (int)dev->tnode_size);
return ok;
}
tn = yaffs_get_tnode(dev);
if (tn)
ok = (yaffs2_checkpt_rd(dev, tn, dev->tnode_size) ==
- dev->tnode_size);
+ (int)dev->tnode_size);
else
ok = 0;
cp.obj_id, cp.parent_id, cp.variant_type,
cp.hdr_chunk);
- if (ok && cp.obj_id == ~0) {
+ if (ok && cp.obj_id == (u32)(~0)) {
done = 1;
} else if (ok) {
obj =
while (increase > 0 && small_increase_ok) {
this_write = increase;
- if (this_write > dev->data_bytes_per_chunk)
+ if (this_write > (int)dev->data_bytes_per_chunk)
this_write = dev->data_bytes_per_chunk;
written =
yaffs_do_file_wr(obj, local_buffer, pos, this_write,
int is_shrink;
int is_unlinked;
struct yaffs_ext_tags tags;
- int result;
int alloc_failed = 0;
int chunk = blk * dev->param.chunks_per_block + chunk_in_block;
struct yaffs_file_var *file_var;
struct yaffs_symlink_var *sl_var;
if (summary_available) {
- result = yaffs_summary_fetch(dev, &tags, chunk_in_block);
+ yaffs_summary_fetch(dev, &tags, chunk_in_block);
tags.seq_number = bi->seq_number;
}
if (!summary_available || tags.obj_id == 0) {
- result = yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
+ yaffs_rd_chunk_tags_nand(dev, chunk, NULL, &tags);
dev->tags_used++;
} else {
dev->summary_used++;
* invalid data until needed.
*/
- result = yaffs_rd_chunk_tags_nand(dev,
+ yaffs_rd_chunk_tags_nand(dev,
chunk,
chunk_data,
NULL);
int yaffs2_scan_backwards(struct yaffs_dev *dev)
{
- int blk;
+ u32 blk;
int block_iter;
int start_iter;
int end_iter;
int n_to_scan = 0;
enum yaffs_block_state state;
int c;
- int deleted;
LIST_HEAD(hard_list);
struct yaffs_block_info *bi;
u32 seq_number;
/* get the block to scan in the correct order */
blk = block_index[block_iter].block;
bi = yaffs_get_block_info(dev, blk);
- deleted = 0;
summary_available = yaffs_summary_read(dev, dev->sum_tags, blk);