?? cfi_cmdset_0001.c
字號:
static int cfi_intelext_partition_fixup(struct mtd_info *mtd, struct cfi_private **pcfi){ struct map_info *map = mtd->priv; struct cfi_private *cfi = *pcfi; struct cfi_pri_intelext *extp = cfi->cmdset_priv; /* * Probing of multi-partition flash ships. * * To support multiple partitions when available, we simply arrange * for each of them to have their own flchip structure even if they * are on the same physical chip. This means completely recreating * a new cfi_private structure right here which is a blatent code * layering violation, but this is still the least intrusive * arrangement at this point. This can be rearranged in the future * if someone feels motivated enough. --nico */ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3' && extp->FeatureSupport & (1 << 9)) { struct cfi_private *newcfi; struct flchip *chip; struct flchip_shared *shared; int offs, numregions, numparts, partshift, numvirtchips, i, j; /* Protection Register info */ offs = (extp->NumProtectionFields - 1) * sizeof(struct cfi_intelext_otpinfo); /* Burst Read info */ offs += extp->extra[offs+1]+2; /* Number of partition regions */ numregions = extp->extra[offs]; offs += 1; /* skip the sizeof(partregion) field in CFI 1.4 */ if (extp->MinorVersion >= '4') offs += 2; /* Number of hardware partitions */ numparts = 0; for (i = 0; i < numregions; i++) { struct cfi_intelext_regioninfo *rinfo; rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs]; numparts += rinfo->NumIdentPartitions; offs += sizeof(*rinfo) + (rinfo->NumBlockTypes - 1) * sizeof(struct cfi_intelext_blockinfo); } /* Programming Region info */ if (extp->MinorVersion >= '4') { struct cfi_intelext_programming_regioninfo *prinfo; prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift; MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid; MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid; mtd->flags |= MTD_PROGRAM_REGIONS; printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", map->name, MTD_PROGREGION_SIZE(mtd), MTD_PROGREGION_CTRLMODE_VALID(mtd), MTD_PROGREGION_CTRLMODE_INVALID(mtd)); } /* * All functions below currently rely on all chips having * the same geometry so we'll just assume that all hardware * partitions are of the same size too. */ partshift = cfi->chipshift - __ffs(numparts); if ((1 << partshift) < mtd->erasesize) { printk( KERN_ERR "%s: bad number of hw partitions (%d)\n", __FUNCTION__, numparts); return -EINVAL; } numvirtchips = cfi->numchips * numparts; newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL); if (!newcfi) return -ENOMEM; shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL); if (!shared) { kfree(newcfi); return -ENOMEM; } memcpy(newcfi, cfi, sizeof(struct cfi_private)); newcfi->numchips = numvirtchips; newcfi->chipshift = partshift; chip = &newcfi->chips[0]; for (i = 0; i < cfi->numchips; i++) { shared[i].writing = shared[i].erasing = NULL; spin_lock_init(&shared[i].lock); for (j = 0; j < numparts; j++) { *chip = cfi->chips[i]; chip->start += j << partshift; chip->priv = &shared[i]; /* those should be reset too since they create memory references. */ init_waitqueue_head(&chip->wq); spin_lock_init(&chip->_spinlock); chip->mutex = &chip->_spinlock; chip++; } } printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips " "--> %d partitions of %d KiB\n", map->name, cfi->numchips, cfi->interleave, newcfi->numchips, 1<<(newcfi->chipshift-10)); map->fldrv_priv = newcfi; *pcfi = newcfi; kfree(cfi); } return 0;}/* * *********** CHIP ACCESS FUNCTIONS *********** */static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode){ DECLARE_WAITQUEUE(wait, current); struct cfi_private *cfi = map->fldrv_priv; map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01); unsigned long timeo; struct cfi_pri_intelext *cfip = cfi->cmdset_priv; resettime: timeo = jiffies + HZ; retry: if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) { /* * OK. We have possibility for contension on the write/erase * operations which are global to the real chip and not per * partition. So let's fight it over in the partition which * currently has authority on the operation. * * The rules are as follows: * * - any write operation must own shared->writing. * * - any erase operation must own _both_ shared->writing and * shared->erasing. * * - contension arbitration is handled in the owner's context. * * The 'shared' struct can be read and/or written only when * its lock is taken. */ struct flchip_shared *shared = chip->priv; struct flchip *contender; spin_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* * The engine to perform desired operation on this * partition is already in use by someone else. * Let's fight over it in the context of the chip * currently using it. If it is possible to suspend, * that other partition will do just that, otherwise * it'll happily send us to sleep. In any case, when * get_chip returns success we're clear to go ahead. */ int ret = spin_trylock(contender->mutex); spin_unlock(&shared->lock); if (!ret) goto retry; spin_unlock(chip->mutex); ret = get_chip(map, contender, contender->start, mode); spin_lock(chip->mutex); if (ret) { spin_unlock(contender->mutex); return ret; } timeo = jiffies + HZ; spin_lock(&shared->lock); spin_unlock(contender->mutex); } /* We now own it */ shared->writing = chip; if (mode == FL_ERASING) shared->erasing = chip; spin_unlock(&shared->lock); } switch (chip->state) { case FL_STATUS: for (;;) { status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; /* At this point we're fine with write operations in other partitions as they don't conflict. */ if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) break; if (time_after(jiffies, timeo)) { printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n", map->name, status.x[0]); return -EIO; } spin_unlock(chip->mutex); cfi_udelay(1); spin_lock(chip->mutex); /* Someone else might have been playing with it. */ goto retry; } case FL_READY: case FL_CFI_QUERY: case FL_JEDEC_QUERY: return 0; case FL_ERASING: if (!cfip || !(cfip->FeatureSupport & 2) || !(mode == FL_READY || mode == FL_POINT || (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) goto sleep; /* Erase suspend */ map_write(map, CMD(0xB0), adr); /* If the flash has finished erasing, then 'erase suspend' * appears to make some (28F320) flash devices switch to * 'read' mode. Make sure that we switch to 'read status' * mode so we get the right data. --rmk */ map_write(map, CMD(0x70), adr); chip->oldstate = FL_ERASING; chip->state = FL_ERASE_SUSPENDING; chip->erase_suspended = 1; for (;;) { status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; if (time_after(jiffies, timeo)) { /* Urgh. Resume and pretend we weren't here. */ map_write(map, CMD(0xd0), adr); /* Make sure we're in 'read status' mode if it had finished */ map_write(map, CMD(0x70), adr); chip->state = FL_ERASING; chip->oldstate = FL_READY; printk(KERN_ERR "%s: Chip not ready after erase " "suspended: status = 0x%lx\n", map->name, status.x[0]); return -EIO; } spin_unlock(chip->mutex); cfi_udelay(1); spin_lock(chip->mutex); /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. So we can just loop here. */ } chip->state = FL_STATUS; return 0; case FL_XIP_WHILE_ERASING: if (mode != FL_READY && mode != FL_POINT && (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1))) goto sleep; chip->oldstate = chip->state; chip->state = FL_READY; return 0; case FL_POINT: /* Only if there's no operation suspended... */ if (mode == FL_READY && chip->oldstate == FL_READY) return 0; default: sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); spin_unlock(chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); spin_lock(chip->mutex); goto resettime; }}static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr){ struct cfi_private *cfi = map->fldrv_priv; if (chip->priv) { struct flchip_shared *shared = chip->priv; spin_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; if (shared->writing && shared->writing != chip) { /* give back ownership to who we loaned it from */ struct flchip *loaner = shared->writing; spin_lock(loaner->mutex); spin_unlock(&shared->lock); spin_unlock(chip->mutex); put_chip(map, loaner, loaner->start); spin_lock(chip->mutex); spin_unlock(loaner->mutex); wake_up(&chip->wq); return; } shared->erasing = NULL; shared->writing = NULL; } else if (shared->erasing == chip && shared->writing != chip) { /* * We own the ability to erase without the ability * to write, which means the erase was suspended * and some other partition is currently writing. * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ spin_unlock(&shared->lock); wake_up(&chip->wq); return; } spin_unlock(&shared->lock); } switch(chip->oldstate) { case FL_ERASING: chip->state = chip->oldstate; /* What if one interleaved chip has finished and the other hasn't? The old code would leave the finished one in READY mode. That's bad, and caused -EROFS errors to be returned from do_erase_oneblock because that's the only bit it checked for at the time. As the state machine appears to explicitly allow sending the 0x70 (Read Status) command to an erasing chip and expecting it to be ignored, that's what we do. */ map_write(map, CMD(0xd0), adr); map_write(map, CMD(0x70), adr); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; case FL_XIP_WHILE_ERASING: chip->state = chip->oldstate; chip->oldstate = FL_READY; break; case FL_READY: case FL_STATUS: case FL_JEDEC_QUERY: /* We should really make set_vpp() count, rather than doing this */ DISABLE_VPP(map); break; default: printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); } wake_up(&chip->wq);}#ifdef CONFIG_MTD_XIP/* * No interrupt what so ever can be serviced while the flash isn't in array * mode. This is ensured by the xip_disable() and xip_enable() functions * enclosing any code path where the flash is known not to be in array mode. * And within a XIP disabled code path, only functions marked with __xipram * may be called and nothing else (it's a good thing to inspect generated * assembly to make sure inline functions were actually inlined and that gcc * didn't emit calls to its own support functions). Also configuring MTD CFI * support to a single buswidth and a single interleave is also recommended. */static void xip_disable(struct map_info *map, struct flchip *chip, unsigned long adr){ /* TODO: chips with no XIP use should ignore and return */ (void) map_read(map, adr); /* ensure mmu mapping is up to date */ local_irq_disable();}static void __xipram xip_enable(struct map_info *map, struct flchip *chip, unsigned long adr){ struct cfi_private *cfi = map->fldrv_priv; if (chip->state != FL_POINT && chip->state != FL_READY) { map_write(map, CMD(0xff), adr); chip->state = FL_READY; } (void) map_read(map, adr); xip_iprefetch(); local_irq_enable();}/* * When a delay is required for the flash operation to complete, the * xip_udelay() function is polling for both the given timeout and pending * (but still masked) hardware interrupts. Whenever there is an interrupt * pending then the flash erase or write operation is suspended, array mode * restored and interrupts unmasked. Task scheduling might also happen at that * point. The CPU eventually returns from the interrupt or the call to * schedule() and the suspended flash operation is resumed for the remaining * of the delay period. * * Warning: this function _will_ fool interrupt latency tracing tools. */static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, unsigned long adr, int usec){ struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_intelext *cfip = cfi->cmdset_priv; map_word status, OK = CMD(0x80); unsigned long suspended, start = xip_currtime(); flstate_t oldstate, newstate; do { cpu_relax(); if (xip_irqpending() && cfip && ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) || (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) && (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { /* * Let's suspend the erase or write operation when * supported. Note that we currently don't try to * suspend interleaved chips if there is already * another operation suspended (imagine what happens * when one chip was already done with the current * operation while another chip suspended it, then * we resume the whole thing at once). Yes, it * can happen! */ map_write(map, CMD(0xb0), adr); map_write(map, CMD(0x70), adr); usec -= xip_elapsed_since(start); suspended = xip_currtime(); do { if (xip_elapsed_since(suspended) > 100000) { /* * The chip doesn't want to suspend * after waiting for 100 msecs. * This is a critical error but there * is not much we can do here. */ return; } status = map_read(map, adr); } while (!map_word_andequal(map, status, OK, OK)); /* Suspend succeeded */ oldstate = chip->state; if (oldstate == FL_ERASING) { if (!map_word_bitsset(map, status, CMD(0x40))) break; newstate = FL_XIP_WHILE_ERASING; chip->erase_suspended = 1; } else { if (!map_word_bitsset(map, status, CMD(0x04))) break; newstate = FL_XIP_WHILE_WRITING; chip->write_suspended = 1; } chip->state = newstate; map_write(map, CMD(0xff), adr); (void) map_read(map, adr); asm volatile (".rep 8; nop; .endr"); local_irq_enable(); spin_unlock(chip->mutex); asm volatile (".rep 8; nop; .endr"); cond_resched(); /* * We're back. However someone else might have * decided to go write to the chip if we are in * a suspended erase state. If so let's wait * until it's done. */ spin_lock(chip->mutex); while (chip->state != newstate) { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); spin_unlock(chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait);
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -