*/
const char *yaffs_guts_c_version =
- "$Id: yaffs_guts.c,v 1.36 2006-09-05 23:23:34 charles Exp $";
+ "$Id: yaffs_guts.c,v 1.45 2006-11-14 03:07:17 charles Exp $";
#include "yportenv.h"
#include "yaffs_checkptrw.h"
#include "yaffs_nand.h"
+#include "yaffs_packedtags2.h"
#ifdef CONFIG_YAFFS_WINCE
/* Robustification (if it ever comes about...) */
static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND);
-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND);
+static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk);
static void yaffs_HandleWriteChunkOk(yaffs_Device * dev, int chunkInNAND,
const __u8 * data,
const yaffs_ExtendedTags * tags);
loff_t yaffs_GetFileSize(yaffs_Object * obj);
-static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve);
+static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr);
static void yaffs_VerifyFreeChunks(yaffs_Device * dev);
static void yaffs_InvalidateCheckpoint(yaffs_Device *dev);
+
+
+/* Function to calculate chunk and offset */
+
+static void yaffs_AddrToChunk(yaffs_Device *dev, loff_t addr, __u32 *chunk, __u32 *offset)
+{
+ if(dev->chunkShift){
+ /* Easy-peasy power of 2 case */
+ *chunk = (__u32)(addr >> dev->chunkShift);
+ *offset = (__u32)(addr & dev->chunkMask);
+ }
+ else if(dev->crumbsPerChunk)
+ {
+ /* Case where we're using "crumbs" */
+ *offset = (__u32)(addr & dev->crumbMask);
+ addr >>= dev->crumbShift;
+ *chunk = ((__u32)addr)/dev->crumbsPerChunk;
+ *offset += ((addr - (*chunk * dev->crumbsPerChunk)) << dev->crumbShift);
+ }
+ else
+ YBUG();
+}
+
+/* Function to return the number of shifts for a power of 2 greater than or equal
+ * to the given number
+ * Note we don't try to cater for all possible numbers and this does not have to
+ * be hellishly efficient.
+ */
+
+static __u32 ShiftsGE(__u32 x)
+{
+ int extraBits;
+ int nShifts;
+
+ nShifts = extraBits = 0;
+
+ while(x>1){
+ if(x & 1) extraBits++;
+ x>>=1;
+ nShifts++;
+ }
+
+ if(extraBits)
+ nShifts++;
+
+ return nShifts;
+}
+
+/* Function to return the number of shifts to get a 1 in bit 0
+ */
+
+static __u32 ShiftDiv(__u32 x)
+{
+ int nShifts;
+
+ nShifts = 0;
+
+ if(!x) return 0;
+
+ while( !(x&1)){
+ x>>=1;
+ nShifts++;
+ }
+
+ return nShifts;
+}
+
+
+
/*
* Temporary buffer manipulations.
*/
*/
dev->unmanagedTempAllocations++;
- return YMALLOC(dev->nBytesPerChunk);
+ return YMALLOC(dev->nDataBytesPerChunk);
}
int retval = YAFFS_OK;
__u8 *data = yaffs_GetTempBuffer(dev, __LINE__);
yaffs_ExtendedTags tags;
+ int result;
- yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunkInNAND, data, &tags);
+
+ if(tags.eccResult > YAFFS_ECC_RESULT_NO_ERROR)
+ retval = YAFFS_FAIL;
+
- if (!yaffs_CheckFF(data, dev->nBytesPerChunk) || tags.chunkUsed) {
+ if (!yaffs_CheckFF(data, dev->nDataBytesPerChunk) || tags.chunkUsed) {
T(YAFFS_TRACE_NANDACCESS,
(TSTR("Chunk %d not erased" TENDSTR), chunkInNAND));
retval = YAFFS_FAIL;
{
int chunk;
- int writeOk = 1;
+ int writeOk = 0;
+ int erasedOk = 1;
int attempts = 0;
+ yaffs_BlockInfo *bi;
yaffs_InvalidateCheckpoint(dev);
do {
- chunk = yaffs_AllocateChunk(dev, useReserve);
+ chunk = yaffs_AllocateChunk(dev, useReserve,&bi);
if (chunk >= 0) {
+ /* First check this chunk is erased, if it needs checking.
+ * The checking policy (unless forced always on) is as follows:
+ * Check the first page we try to write in a block.
+ * - If the check passes then we don't need to check any more.
+ * - If the check fails, we check again...
+ * If the block has been erased, we don't need to check.
+ *
+ * However, if the block has been prioritised for gc, then
+ * we think there might be something odd about this block
+ * and stop using it.
+ *
+ * Rationale:
+ * We should only ever see chunks that have not been erased
+ * if there was a partially written chunk due to power loss
+ * This checking policy should catch that case with very
+ * few checks and thus save a lot of checks that are most likely not
+ * needed.
+ */
+
+ if(bi->gcPrioritise){
+ yaffs_DeleteChunk(dev, chunk, 1, __LINE__);
+ } else {
+#ifdef CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED
+
+ bi->skipErasedCheck = 0;
- /* First check this chunk is erased... */
-#ifndef CONFIG_YAFFS_DISABLE_CHUNK_ERASED_CHECK
- writeOk = yaffs_CheckChunkErased(dev, chunk);
#endif
- if (!writeOk) {
- T(YAFFS_TRACE_ERROR,
- (TSTR
- ("**>> yaffs chunk %d was not erased"
- TENDSTR), chunk));
- } else {
- writeOk =
- yaffs_WriteChunkWithTagsToNAND(dev, chunk,
- data, tags);
- }
- attempts++;
+ if(!bi->skipErasedCheck){
+ erasedOk = yaffs_CheckChunkErased(dev, chunk);
+ if(erasedOk && !bi->gcPrioritise)
+ bi->skipErasedCheck = 1;
+ }
- if (writeOk) {
- /*
- * Copy the data into the robustification buffer.
- * NB We do this at the end to prevent duplicates in the case of a write error.
- * Todo
- */
- yaffs_HandleWriteChunkOk(dev, chunk, data,
- tags);
- } else {
- yaffs_HandleWriteChunkError(dev, chunk);
+ if (!erasedOk) {
+ T(YAFFS_TRACE_ERROR,
+ (TSTR
+ ("**>> yaffs chunk %d was not erased"
+ TENDSTR), chunk));
+ } else {
+ writeOk =
+ yaffs_WriteChunkWithTagsToNAND(dev, chunk,
+ data, tags);
+ }
+
+ attempts++;
+
+ if (writeOk) {
+ /*
+ * Copy the data into the robustification buffer.
+ * NB We do this at the end to prevent duplicates in the case of a write error.
+ * Todo
+ */
+ yaffs_HandleWriteChunkOk(dev, chunk, data, tags);
+
+ } else {
+ /* The erased check or write failed */
+ yaffs_HandleWriteChunkError(dev, chunk, erasedOk);
+ }
}
}
static void yaffs_RetireBlock(yaffs_Device * dev, int blockInNAND)
{
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
yaffs_InvalidateCheckpoint(dev);
yaffs_MarkBlockBad(dev, blockInNAND);
- yaffs_GetBlockInfo(dev, blockInNAND)->blockState =
- YAFFS_BLOCK_STATE_DEAD;
+ bi->blockState = YAFFS_BLOCK_STATE_DEAD;
+ bi->gcPrioritise = 0;
+ bi->needsRetiring = 0;
dev->nRetiredBlocks++;
}
{
}
-static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND)
+void yaffs_HandleChunkError(yaffs_Device *dev, yaffs_BlockInfo *bi)
+{
+ if(!bi->gcPrioritise){
+ bi->gcPrioritise = 1;
+ dev->hasPendingPrioritisedGCs = 1;
+ bi->chunkErrorStrikes ++;
+
+ if(bi->chunkErrorStrikes > 3){
+ bi->needsRetiring = 1; /* Too many stikes, so retire this */
+ T(YAFFS_TRACE_ALWAYS, (TSTR("yaffs: Block struck out" TENDSTR)));
+
+ }
+
+ }
+}
+
+static void yaffs_ReportOddballBlocks(yaffs_Device *dev)
+{
+ int i;
+
+ for(i = dev->internalStartBlock; i <= dev->internalEndBlock && (yaffs_traceMask & YAFFS_TRACE_BAD_BLOCKS); i++){
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev,i);
+ if(bi->needsRetiring || bi->gcPrioritise)
+ T(YAFFS_TRACE_BAD_BLOCKS,(TSTR("yaffs block %d%s%s" TENDSTR),
+ i,
+ bi->needsRetiring ? " needs retiring" : "",
+ bi->gcPrioritise ? " gc prioritised" : ""));
+
+ }
+}
+
+static void yaffs_HandleWriteChunkError(yaffs_Device * dev, int chunkInNAND, int erasedOk)
{
+
int blockInNAND = chunkInNAND / dev->nChunksPerBlock;
+ yaffs_BlockInfo *bi = yaffs_GetBlockInfo(dev, blockInNAND);
+
+ yaffs_HandleChunkError(dev,bi);
+
+
+ if(erasedOk ) {
+ /* Was an actual write failure, so mark the block for retirement */
+ bi->needsRetiring = 1;
+ T(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
+ (TSTR("**>> Block %d needs retiring" TENDSTR), blockInNAND));
- /* Mark the block for retirement */
- yaffs_GetBlockInfo(dev, blockInNAND)->needsRetiring = 1;
+
+ }
+
/* Delete the chunk */
yaffs_DeleteChunk(dev, chunkInNAND, 1, __LINE__);
}
dev->blockInfoAlt = 0;
/* Set up dynamic blockinfo stuff. */
- dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; // round up bytes
+ dev->chunkBitmapStride = (dev->nChunksPerBlock + 7) / 8; /* round up bytes */
dev->chunkBits = YMALLOC(dev->chunkBitmapStride * nBlocks);
if(!dev->chunkBits){
dev->chunkBits = YMALLOC_ALT(dev->chunkBitmapStride * nBlocks);
*/
return (bi->sequenceNumber <= dev->oldestDirtySequence);
- return 1;
-
}
/* FindDiretiestBlock is used to select the dirtiest block (or close enough)
int iterations;
int dirtiest = -1;
int pagesInUse;
+ int prioritised=0;
yaffs_BlockInfo *bi;
static int nonAggressiveSkip = 0;
+ int pendingPrioritisedExist = 0;
+
+ /* First let's see if we need to grab a prioritised block */
+ if(dev->hasPendingPrioritisedGCs){
+ for(i = dev->internalStartBlock; i < dev->internalEndBlock && !prioritised; i++){
+
+ bi = yaffs_GetBlockInfo(dev, i);
+ if(bi->gcPrioritise) {
+ pendingPrioritisedExist = 1;
+ if(bi->blockState == YAFFS_BLOCK_STATE_FULL &&
+ yaffs_BlockNotDisqualifiedFromGC(dev, bi)){
+ pagesInUse = (bi->pagesInUse - bi->softDeletions);
+ dirtiest = i;
+ prioritised = 1;
+ aggressive = 1; /* Fool the non-aggressive skip logiv below */
+ }
+ }
+ }
+
+ if(!pendingPrioritisedExist) /* None found, so we can clear this */
+ dev->hasPendingPrioritisedGCs = 0;
+ }
/* If we're doing aggressive GC then we are happy to take a less-dirty block, and
* search harder.
return -1;
}
- pagesInUse =
- (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
+ if(!prioritised)
+ pagesInUse =
+ (aggressive) ? dev->nChunksPerBlock : YAFFS_PASSIVE_GC_CHUNKS + 1;
if (aggressive) {
iterations =
}
}
- for (i = 0; i <= iterations && pagesInUse > 0; i++) {
+ for (i = 0; i <= iterations && pagesInUse > 0 && !prioritised; i++) {
b++;
if (b < dev->internalStartBlock || b > dev->internalEndBlock) {
b = dev->internalStartBlock;
#endif
if (bi->blockState == YAFFS_BLOCK_STATE_FULL &&
- (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
+ (bi->pagesInUse - bi->softDeletions) < pagesInUse &&
yaffs_BlockNotDisqualifiedFromGC(dev, bi)) {
dirtiest = b;
pagesInUse = (bi->pagesInUse - bi->softDeletions);
if (dirtiest > 0) {
T(YAFFS_TRACE_GC,
- (TSTR("GC Selected block %d with %d free" TENDSTR), dirtiest,
- dev->nChunksPerBlock - pagesInUse));
+ (TSTR("GC Selected block %d with %d free, prioritised:%d" TENDSTR), dirtiest,
+ dev->nChunksPerBlock - pagesInUse,prioritised));
}
dev->oldestDirtySequence = 0;
/* If the block is still healthy erase it and mark as clean.
* If the block has had a data failure, then retire it.
*/
+
+ T(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE,
+ (TSTR("yaffs_BlockBecameDirty block %d state %d %s"TENDSTR),
+ blockNo, bi->blockState, (bi->needsRetiring) ? "needs retiring" : ""));
+
bi->blockState = YAFFS_BLOCK_STATE_DIRTY;
if (!bi->needsRetiring) {
bi->pagesInUse = 0;
bi->softDeletions = 0;
bi->hasShrinkHeader = 0;
+ bi->skipErasedCheck = 1; /* This is clean, so no need to check */
+ bi->gcPrioritise = 0;
yaffs_ClearChunkBits(dev, blockNo);
T(YAFFS_TRACE_ERASE,
return (dev->nFreeChunks > reservedChunks);
}
-static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve)
+static int yaffs_AllocateChunk(yaffs_Device * dev, int useReserve, yaffs_BlockInfo **blockUsedPtr)
{
int retVal;
yaffs_BlockInfo *bi;
dev->allocationBlock = -1;
}
+ if(blockUsedPtr)
+ *blockUsedPtr = bi;
+
return retVal;
}
objId = in->objectId;
fSize = in->variant.fileVariant.fileSize;
nChunks =
- (fSize + in->myDev->nBytesPerChunk - 1) / in->myDev->nBytesPerChunk;
+ (fSize + in->myDev->nDataBytesPerChunk - 1) / in->myDev->nDataBytesPerChunk;
for (chunk = 1; chunk <= nChunks; chunk++) {
tn = yaffs_FindLevel0Tnode(in->myDev, &in->variant.fileVariant,
if (chunkInNAND >= 0) {
return yaffs_ReadChunkWithTagsFromNAND(in->myDev, chunkInNAND,
- buffer, NULL);
+ buffer,NULL);
} else {
T(YAFFS_TRACE_NANDACCESS,
(TSTR("Chunk %d not found zero instead" TENDSTR),
chunkInNAND));
/* get sane (zero) data if you read a hole */
- memset(buffer, 0, in->myDev->nBytesPerChunk);
+ memset(buffer, 0, in->myDev->nDataBytesPerChunk);
return 0;
}
int prevChunkId;
int retVal = 0;
+ int result = 0;
int newChunkId;
yaffs_ExtendedTags newTags;
prevChunkId = in->chunkId;
if (prevChunkId >= 0) {
- yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, prevChunkId,
buffer, NULL);
memcpy(oldName, oh->name, sizeof(oh->name));
}
- memset(buffer, 0xFF, dev->nBytesPerChunk);
+ memset(buffer, 0xFF, dev->nDataBytesPerChunk);
oh->type = in->variantType;
oh->yst_mode = in->yst_mode;
yaffs_ChunkCache *cache;
int nCaches = obj->myDev->nShortOpCaches;
- if(nCaches > 0){
- for(i = 0; i < nCaches; i++){
- if (dev->srCache[i].object == obj &&
- dev->srCache[i].dirty)
- return 1;
- }
+ for(i = 0; i < nCaches; i++){
+ cache = &dev->srCache[i];
+ if (cache->object == obj &&
+ cache->dirty)
+ return 1;
}
return 0;
yaffs_CheckpointValidity cp;
cp.structType = sizeof(cp);
cp.magic = YAFFS_MAGIC;
- cp.version = 1;
+ cp.version = YAFFS_CHECKPOINT_VERSION;
cp.head = (head) ? 1 : 0;
return (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp))?
if(ok)
ok = (cp.structType == sizeof(cp)) &&
(cp.magic == YAFFS_MAGIC) &&
- (cp.version == 1) &&
+ (cp.version == YAFFS_CHECKPOINT_VERSION) &&
(cp.head == ((head) ? 1 : 0));
return ok ? 1 : 0;
}
if (!obj->deferedFree) {
yaffs_ObjectToCheckpointObject(&cp,obj);
cp.structType = sizeof(cp);
- /* printf("Write out object %d type %d\n",obj->objectId,obj->variantType); */
+
+ T(YAFFS_TRACE_CHECKPOINT,(
+ TSTR("Checkpoint write object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+ cp.objectId,cp.parentId,cp.variantType,cp.chunkId,(unsigned) obj));
+
ok = (yaffs_CheckpointWrite(dev,&cp,sizeof(cp)) == sizeof(cp));
if(ok && obj->variantType == YAFFS_OBJECT_TYPE_FILE){
if(ok && cp.objectId == ~0)
done = 1;
else if(ok){
- T(YAFFS_TRACE_CHECKPOINT,(TSTR("Read object %d parent %d type %d" TENDSTR),
- cp.objectId,cp.parentId,cp.variantType));
obj = yaffs_FindOrCreateObjectByNumber(dev,cp.objectId, cp.variantType);
+ T(YAFFS_TRACE_CHECKPOINT,(TSTR("Checkpoint read object %d parent %d type %d chunk %d obj addr %x" TENDSTR),
+ cp.objectId,cp.parentId,cp.variantType,cp.chunkId,(unsigned) obj));
if(obj) {
yaffs_CheckpointObjectToObject(obj,&cp);
if(obj->variantType == YAFFS_OBJECT_TYPE_FILE) {
int yaffs_CheckpointSave(yaffs_Device *dev)
{
+ yaffs_ReportOddballBlocks(dev);
T(YAFFS_TRACE_CHECKPOINT,(TSTR("save entry: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
if(!dev->isCheckpointed)
T(YAFFS_TRACE_CHECKPOINT,(TSTR("restore exit: isCheckpointed %d"TENDSTR),dev->isCheckpointed));
+ yaffs_ReportOddballBlocks(dev);
+
return retval;
}
* Curve-balls: the first chunk might also be the last chunk.
*/
-int yaffs_ReadDataFromFile(yaffs_Object * in, __u8 * buffer, __u32 offset,
+int yaffs_ReadDataFromFile(yaffs_Object * in, __u8 * buffer, loff_t offset,
int nBytes)
{
dev = in->myDev;
while (n > 0) {
- chunk = offset / dev->nBytesPerChunk + 1; /* The first chunk is 1 */
- start = offset % dev->nBytesPerChunk;
+ //chunk = offset / dev->nDataBytesPerChunk + 1;
+ //start = offset % dev->nDataBytesPerChunk;
+ yaffs_AddrToChunk(dev,offset,&chunk,&start);
+ chunk++;
/* OK now check for the curveball where the start and end are in
* the same chunk.
*/
- if ((start + n) < dev->nBytesPerChunk) {
+ if ((start + n) < dev->nDataBytesPerChunk) {
nToCopy = n;
} else {
- nToCopy = dev->nBytesPerChunk - start;
+ nToCopy = dev->nDataBytesPerChunk - start;
}
cache = yaffs_FindChunkCache(in, chunk);
* then use the cache (if there is caching)
* else bypass the cache.
*/
- if (cache || nToCopy != dev->nBytesPerChunk) {
+ if (cache || nToCopy != dev->nDataBytesPerChunk) {
if (dev->nShortOpCaches > 0) {
/* If we can't find the data in the cache, then load it up. */
#ifdef CONFIG_YAFFS_WINCE
yfsd_UnlockYAFFS(TRUE);
#endif
- memcpy(buffer, localBuffer, dev->nBytesPerChunk);
+ memcpy(buffer, localBuffer, dev->nDataBytesPerChunk);
#ifdef CONFIG_YAFFS_WINCE
yfsd_LockYAFFS(TRUE);
return nDone;
}
-int yaffs_WriteDataToFile(yaffs_Object * in, const __u8 * buffer, __u32 offset,
+int yaffs_WriteDataToFile(yaffs_Object * in, const __u8 * buffer, loff_t offset,
int nBytes, int writeThrough)
{
dev = in->myDev;
while (n > 0 && chunkWritten >= 0) {
- chunk = offset / dev->nBytesPerChunk + 1;
- start = offset % dev->nBytesPerChunk;
+ //chunk = offset / dev->nDataBytesPerChunk + 1;
+ //start = offset % dev->nDataBytesPerChunk;
+ yaffs_AddrToChunk(dev,offset,&chunk,&start);
+ chunk++;
/* OK now check for the curveball where the start and end are in
* the same chunk.
*/
- if ((start + n) < dev->nBytesPerChunk) {
+ if ((start + n) < dev->nDataBytesPerChunk) {
nToCopy = n;
/* Now folks, to calculate how many bytes to write back....
nBytesRead =
in->variant.fileVariant.fileSize -
- ((chunk - 1) * dev->nBytesPerChunk);
+ ((chunk - 1) * dev->nDataBytesPerChunk);
- if (nBytesRead > dev->nBytesPerChunk) {
- nBytesRead = dev->nBytesPerChunk;
+ if (nBytesRead > dev->nDataBytesPerChunk) {
+ nBytesRead = dev->nDataBytesPerChunk;
}
nToWriteBack =
(start + n)) ? nBytesRead : (start + n);
} else {
- nToCopy = dev->nBytesPerChunk - start;
- nToWriteBack = dev->nBytesPerChunk;
+ nToCopy = dev->nDataBytesPerChunk - start;
+ nToWriteBack = dev->nDataBytesPerChunk;
}
- if (nToCopy != dev->nBytesPerChunk) {
+ if (nToCopy != dev->nDataBytesPerChunk) {
/* An incomplete start or end chunk (or maybe both start and end chunk) */
if (dev->nShortOpCaches > 0) {
yaffs_ChunkCache *cache;
#ifdef CONFIG_YAFFS_WINCE
yfsd_UnlockYAFFS(TRUE);
#endif
- memcpy(localBuffer, buffer, dev->nBytesPerChunk);
+ memcpy(localBuffer, buffer, dev->nDataBytesPerChunk);
#ifdef CONFIG_YAFFS_WINCE
yfsd_LockYAFFS(TRUE);
#endif
chunkWritten =
yaffs_WriteChunkDataToObject(in, chunk, localBuffer,
- dev->nBytesPerChunk,
+ dev->nDataBytesPerChunk,
0);
yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
#else
/* A full chunk. Write directly from the supplied buffer. */
chunkWritten =
yaffs_WriteChunkDataToObject(in, chunk, buffer,
- dev->nBytesPerChunk,
+ dev->nDataBytesPerChunk,
0);
#endif
/* Since we've overwritten the cached data, we better invalidate it. */
yaffs_Device *dev = in->myDev;
int oldFileSize = in->variant.fileVariant.fileSize;
- int lastDel = 1 + (oldFileSize - 1) / dev->nBytesPerChunk;
+ int lastDel = 1 + (oldFileSize - 1) / dev->nDataBytesPerChunk;
- int startDel = 1 + (newSize + dev->nBytesPerChunk - 1) /
- dev->nBytesPerChunk;
+ int startDel = 1 + (newSize + dev->nDataBytesPerChunk - 1) /
+ dev->nDataBytesPerChunk;
int i;
int chunkId;
}
-int yaffs_ResizeFile(yaffs_Object * in, int newSize)
+int yaffs_ResizeFile(yaffs_Object * in, loff_t newSize)
{
int oldFileSize = in->variant.fileVariant.fileSize;
- int sizeOfPartialChunk;
+ int newSizeOfPartialChunk;
+ int newFullChunks;
+
yaffs_Device *dev = in->myDev;
-
- sizeOfPartialChunk = newSize % dev->nBytesPerChunk;
+
+ yaffs_AddrToChunk(dev, newSize, &newFullChunks, &newSizeOfPartialChunk);
yaffs_FlushFilesChunkCache(in);
yaffs_InvalidateWholeChunkCache(in);
yaffs_PruneResizedChunks(in, newSize);
- if (sizeOfPartialChunk != 0) {
- int lastChunk = 1 + newSize / dev->nBytesPerChunk;
+ if (newSizeOfPartialChunk != 0) {
+ int lastChunk = 1 + newFullChunks;
+
__u8 *localBuffer = yaffs_GetTempBuffer(dev, __LINE__);
/* Got to read and rewrite the last chunk with its new size and zero pad */
yaffs_ReadChunkDataFromObject(in, lastChunk,
localBuffer);
- memset(localBuffer + sizeOfPartialChunk, 0,
- dev->nBytesPerChunk - sizeOfPartialChunk);
+ memset(localBuffer + newSizeOfPartialChunk, 0,
+ dev->nDataBytesPerChunk - newSizeOfPartialChunk);
yaffs_WriteChunkDataToObject(in, lastChunk, localBuffer,
- sizeOfPartialChunk, 1);
+ newSizeOfPartialChunk, 1);
yaffs_ReleaseTempBuffer(dev, localBuffer, __LINE__);
}
int startIterator;
int endIterator;
int nBlocksToScan = 0;
+ int result;
int chunk;
int c;
/* Read the tags and decide what to do */
chunk = blk * dev->nChunksPerBlock + c;
- yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
&tags);
/* Let's have a good look at this chunk... */
yaffs_PutChunkIntoFile(in, tags.chunkId, chunk,
1);
endpos =
- (tags.chunkId - 1) * dev->nBytesPerChunk +
+ (tags.chunkId - 1) * dev->nDataBytesPerChunk +
tags.byteCount;
if (in->variantType == YAFFS_OBJECT_TYPE_FILE
&& in->variant.fileVariant.scannedFileSize <
yaffs_SetChunkBit(dev, blk, c);
bi->pagesInUse++;
- yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk,
chunkData,
NULL);
__u8 *chunkData;
yaffs_ObjectHeader *oh;
yaffs_Device *dev = in->myDev;
+ yaffs_ExtendedTags tags;
+ int result;
+#if 0
+ T(YAFFS_TRACE_SCAN,(TSTR("details for object %d %s loaded" TENDSTR),
+ in->objectId,
+ in->lazyLoaded ? "not yet" : "already"));
+#endif
+
if(in->lazyLoaded){
in->lazyLoaded = 0;
chunkData = yaffs_GetTempBuffer(dev, __LINE__);
- yaffs_ReadChunkWithTagsFromNAND(dev,in->chunkId,chunkData,NULL);
+ result = yaffs_ReadChunkWithTagsFromNAND(dev,in->chunkId,chunkData,&tags);
oh = (yaffs_ObjectHeader *) chunkData;
in->yst_mode = oh->yst_mode;
int nBlocksToScan = 0;
int chunk;
+ int result;
int c;
int deleted;
yaffs_BlockState state;
int fileSize;
int isShrink;
+ int foundChunksInBlock;
int equivalentObjectId;
deleted = 0;
/* For each chunk in each block that needs scanning.... */
+ foundChunksInBlock = 0;
for (c = dev->nChunksPerBlock - 1; c >= 0 &&
(state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
state == YAFFS_BLOCK_STATE_ALLOCATING); c--) {
*/
chunk = blk * dev->nChunksPerBlock + c;
- yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
+ result = yaffs_ReadChunkWithTagsFromNAND(dev, chunk, NULL,
&tags);
/* Let's have a good look at this chunk... */
if (!tags.chunkUsed) {
- // An unassigned chunk in the block
- // This means that either the block is empty or
- // this is the one being allocated from
+ /* An unassigned chunk in the block.
+ * If there are used chunks after this one, then
+ * it is a chunk that was skipped due to failing the erased
+ * check. Just skip it so that it can be deleted.
+ * But, more typically, We get here when this is an unallocated
+ * chunk and his means that either the block is empty or
+ * this is the one being allocated from
+ */
- if (c == 0) {
+ if(foundChunksInBlock)
+ {
+ /* This is a chunk that was skipped due to failing the erased check */
+
+ } else if (c == 0) {
/* We're looking at the first chunk in the block so the block is unused */
state = YAFFS_BLOCK_STATE_EMPTY;
dev->nErasedBlocks++;
} else {
- /* this is the block being allocated from */
- if (state ==
- YAFFS_BLOCK_STATE_NEEDS_SCANNING) {
- T(YAFFS_TRACE_SCAN,
- (TSTR
- (" Allocating from %d %d"
- TENDSTR), blk, c));
+ if (state == YAFFS_BLOCK_STATE_NEEDS_SCANNING ||
+ state == YAFFS_BLOCK_STATE_ALLOCATING) {
+ if(dev->sequenceNumber == bi->sequenceNumber) {
+ /* this is the block being allocated from */
+
+ T(YAFFS_TRACE_SCAN,
+ (TSTR
+ (" Allocating from %d %d"
+ TENDSTR), blk, c));
+
+ state = YAFFS_BLOCK_STATE_ALLOCATING;
+ dev->allocationBlock = blk;
+ dev->allocationPage = c;
+ dev->allocationBlockFinder = blk;
+ }
+ else {
+ /* This is a partially written block that is not
+ * the current allocation block. This block must have
+ * had a write failure, so set up for retirement.
+ */
+
+ bi->needsRetiring = 1;
+ bi->gcPrioritise = 1;
+
+ T(YAFFS_TRACE_ALWAYS,
+ (TSTR("Partially written block %d being set for retirement" TENDSTR),
+ blk));
+ }
+
}
- state = YAFFS_BLOCK_STATE_ALLOCATING;
- dev->allocationBlock = blk;
- dev->allocationPage = c;
- dev->allocationBlockFinder = blk;
- /* Set it to here to encourage the allocator to
- * go forth from here.
- */
- /* Yaffs2 sanity check:
- * This should be the one with the highest sequence number
- */
- if (dev->isYaffs2
- && (dev->sequenceNumber !=
- bi->sequenceNumber)) {
- T(YAFFS_TRACE_ALWAYS,
- (TSTR
- ("yaffs: Allocation block %d was not highest sequence "
- "id: block seq = %d, dev seq = %d"
- TENDSTR), blk,
- bi->sequenceNumber,
- dev->sequenceNumber));
- }
}
dev->nFreeChunks++;
+
} else if (tags.chunkId > 0) {
/* chunkId > 0 so it is a data chunk... */
unsigned int endpos;
-
__u32 chunkBase =
- (tags.chunkId - 1) * dev->nBytesPerChunk;
+ (tags.chunkId - 1) * dev->nDataBytesPerChunk;
+
+ foundChunksInBlock = 1;
+
yaffs_SetChunkBit(dev, blk, c);
bi->pagesInUse++;
*/
endpos =
(tags.chunkId -
- 1) * dev->nBytesPerChunk +
+ 1) * dev->nDataBytesPerChunk +
tags.byteCount;
if (!in->valid && /* have not got an object header yet */
/* chunkId == 0, so it is an ObjectHeader.
* Thus, we read in the object header and make the object
*/
+ foundChunksInBlock = 1;
+
yaffs_SetChunkBit(dev, blk, c);
bi->pagesInUse++;
* living with invalid data until needed.
*/
- yaffs_ReadChunkWithTagsFromNAND(dev,
+ result = yaffs_ReadChunkWithTagsFromNAND(dev,
chunk,
chunkData,
NULL);
}
#endif
else {
+ int result;
__u8 *buffer = yaffs_GetTempBuffer(obj->myDev, __LINE__);
yaffs_ObjectHeader *oh = (yaffs_ObjectHeader *) buffer;
- memset(buffer, 0, obj->myDev->nBytesPerChunk);
+ memset(buffer, 0, obj->myDev->nDataBytesPerChunk);
if (obj->chunkId >= 0) {
- yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
+ result = yaffs_ReadChunkWithTagsFromNAND(obj->myDev,
obj->chunkId, buffer,
NULL);
}
return yaffs_strlen(obj->variant.symLinkVariant.alias);
} else {
/* Only a directory should drop through to here */
- return obj->myDev->nBytesPerChunk;
+ return obj->myDev->nDataBytesPerChunk;
}
}
{
unsigned x;
int bits;
- int extraBits;
T(YAFFS_TRACE_TRACING, (TSTR("yaffs: yaffs_GutsInitialise()" TENDSTR)));
/* Check geometry parameters. */
- if ((dev->isYaffs2 && dev->nBytesPerChunk < 1024) ||
- (!dev->isYaffs2 && dev->nBytesPerChunk != 512) ||
+ if ((dev->isYaffs2 && dev->nDataBytesPerChunk < 1024) ||
+ (!dev->isYaffs2 && dev->nDataBytesPerChunk != 512) ||
dev->nChunksPerBlock < 2 ||
dev->nReservedBlocks < 2 ||
dev->internalStartBlock <= 0 ||
T(YAFFS_TRACE_ALWAYS,
(TSTR
("yaffs: NAND geometry problems: chunk size %d, type is yaffs%s "
- TENDSTR), dev->nBytesPerChunk, dev->isYaffs2 ? "2" : ""));
+ TENDSTR), dev->nDataBytesPerChunk, dev->isYaffs2 ? "2" : ""));
return YAFFS_FAIL;
}
- /* OK now calculate a few things for the device
+ /* OK now calculate a few things for the device */
+
+ /*
+ * Calculate all the chunk size manipulation numbers:
+ */
+ /* Start off assuming it is a power of 2 */
+ dev->chunkShift = ShiftDiv(dev->nDataBytesPerChunk);
+ dev->chunkMask = (1<<dev->chunkShift) - 1;
+
+ if(dev->nDataBytesPerChunk == (dev->chunkMask + 1)){
+ /* Yes it is a power of 2, disable crumbs */
+ dev->crumbMask = 0;
+ dev->crumbShift = 0;
+ dev->crumbsPerChunk = 0;
+ } else {
+ /* Not a power of 2, use crumbs instead */
+ dev->crumbShift = ShiftDiv(sizeof(yaffs_PackedTags2TagsPart));
+ dev->crumbMask = (1<<dev->crumbShift)-1;
+ dev->crumbsPerChunk = dev->nDataBytesPerChunk/(1 << dev->crumbShift);
+ dev->chunkShift = 0;
+ dev->chunkMask = 0;
+ }
+
+
+ /*
* Calculate chunkGroupBits.
* We need to find the next power of 2 > than internalEndBlock
*/
x = dev->nChunksPerBlock * (dev->internalEndBlock + 1);
-
- for (bits = extraBits = 0; x > 1; bits++) {
- if (x & 1)
- extraBits++;
- x >>= 1;
- }
-
- if (extraBits > 0)
- bits++;
+
+ bits = ShiftsGE(x);
/* Set up tnode width if wide tnodes are enabled. */
if(!dev->wideTnodesDisabled){
dev->nErasureFailures = 0;
dev->nErasedBlocks = 0;
dev->isDoingGC = 0;
+ dev->hasPendingPrioritisedGCs = 1; /* Assume the worst for now, will get fixed on first GC */
/* Initialise temporary buffers and caches. */
{
for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) {
dev->tempBuffer[i].line = 0; /* not in use */
dev->tempBuffer[i].buffer =
- YMALLOC_DMA(dev->nBytesPerChunk);
+ YMALLOC_DMA(dev->nDataBytesPerChunk);
}
}
dev->srCache[i].object = NULL;
dev->srCache[i].lastUse = 0;
dev->srCache[i].dirty = 0;
- dev->srCache[i].data = YMALLOC_DMA(dev->nBytesPerChunk);
+ dev->srCache[i].data = YMALLOC_DMA(dev->nDataBytesPerChunk);
}
dev->srLastUse = 0;
}