1/* 2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com> 3 * Copyright © 2004 Micron Technology Inc. 4 * Copyright © 2004 David Brownell 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#include <linux/platform_device.h> 12#include <linux/dmaengine.h> 13#include <linux/dma-mapping.h> 14#include <linux/delay.h> 15#include <linux/module.h> 16#include <linux/interrupt.h> 17#include <linux/jiffies.h> 18#include <linux/sched.h> 19#include <linux/mtd/mtd.h> 20#include <linux/mtd/nand.h> 21#include <linux/mtd/partitions.h> 22#include <linux/omap-dma.h> 23#include <linux/io.h> 24#include <linux/slab.h> 25#include <linux/of.h> 26#include <linux/of_device.h> 27 28#include <linux/mtd/nand_bch.h> 29#include <linux/platform_data/elm.h> 30 31#include <linux/platform_data/mtd-nand-omap2.h> 32 33#define DRIVER_NAME "omap2-nand" 34#define OMAP_NAND_TIMEOUT_MS 5000 35 36#define NAND_Ecc_P1e (1 << 0) 37#define NAND_Ecc_P2e (1 << 1) 38#define NAND_Ecc_P4e (1 << 2) 39#define NAND_Ecc_P8e (1 << 3) 40#define NAND_Ecc_P16e (1 << 4) 41#define NAND_Ecc_P32e (1 << 5) 42#define NAND_Ecc_P64e (1 << 6) 43#define NAND_Ecc_P128e (1 << 7) 44#define NAND_Ecc_P256e (1 << 8) 45#define NAND_Ecc_P512e (1 << 9) 46#define NAND_Ecc_P1024e (1 << 10) 47#define NAND_Ecc_P2048e (1 << 11) 48 49#define NAND_Ecc_P1o (1 << 16) 50#define NAND_Ecc_P2o (1 << 17) 51#define NAND_Ecc_P4o (1 << 18) 52#define NAND_Ecc_P8o (1 << 19) 53#define NAND_Ecc_P16o (1 << 20) 54#define NAND_Ecc_P32o (1 << 21) 55#define NAND_Ecc_P64o (1 << 22) 56#define NAND_Ecc_P128o (1 << 23) 57#define NAND_Ecc_P256o (1 << 24) 58#define NAND_Ecc_P512o (1 << 25) 59#define NAND_Ecc_P1024o (1 << 26) 60#define NAND_Ecc_P2048o (1 << 27) 61 62#define TF(value) (value ? 1 : 0) 63 64#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0) 65#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1) 66#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2) 67#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3) 68#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4) 69#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5) 70#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6) 71#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7) 72 73#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0) 74#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1) 75#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2) 76#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3) 77#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4) 78#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5) 79#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6) 80#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7) 81 82#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0) 83#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1) 84#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2) 85#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3) 86#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4) 87#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5) 88#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6) 89#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7) 90 91#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0) 92#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1) 93#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2) 94#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3) 95#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4) 96#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5) 97#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6) 98#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7) 99 100#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 101#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 102 103#define PREFETCH_CONFIG1_CS_SHIFT 24 104#define ECC_CONFIG_CS_SHIFT 1 105#define CS_MASK 0x7 106#define ENABLE_PREFETCH (0x1 << 7) 107#define DMA_MPU_MODE_SHIFT 2 108#define ECCSIZE0_SHIFT 12 109#define ECCSIZE1_SHIFT 22 110#define ECC1RESULTSIZE 0x1 111#define ECCCLEAR 0x100 112#define ECC1 0x1 113#define PREFETCH_FIFOTHRESHOLD_MAX 0x40 114#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8) 115#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff) 116#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) 117#define STATUS_BUFF_EMPTY 0x00000001 118 119#define OMAP24XX_DMA_GPMC 4 120 121#define SECTOR_BYTES 512 122/* 4 bit padding to make byte aligned, 56 = 52 + 4 */ 123#define BCH4_BIT_PAD 4 124 125/* GPMC ecc engine settings for read */ 126#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */ 127#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */ 128#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */ 129#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */ 130#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */ 131 132/* GPMC ecc engine settings for write */ 133#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */ 134#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */ 135#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */ 136 137#define BADBLOCK_MARKER_LENGTH 2 138 139static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55, 140 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78, 141 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93, 142 0x07, 0x0e}; 143static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc, 144 0xac, 0x6b, 0xff, 0x99, 0x7b}; 145static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10}; 146 147/* oob info generated runtime depending on ecc algorithm and layout selected */ 148static struct nand_ecclayout omap_oobinfo; 149 150struct omap_nand_info { 151 struct nand_hw_control controller; 152 struct omap_nand_platform_data *pdata; 153 struct mtd_info mtd; 154 struct nand_chip nand; 155 struct platform_device *pdev; 156 157 int gpmc_cs; 158 unsigned long phys_base; 159 enum omap_ecc ecc_opt; 160 struct completion comp; 161 struct dma_chan *dma; 162 int gpmc_irq_fifo; 163 int gpmc_irq_count; 164 enum { 165 OMAP_NAND_IO_READ = 0, /* read */ 166 OMAP_NAND_IO_WRITE, /* write */ 167 } iomode; 168 u_char *buf; 169 int buf_len; 170 struct gpmc_nand_regs reg; 171 /* fields specific for BCHx_HW ECC scheme */ 172 struct device *elm_dev; 173 struct device_node *of_node; 174}; 175 176/** 177 * omap_prefetch_enable - configures and starts prefetch transfer 178 * @cs: cs (chip select) number 179 * @fifo_th: fifo threshold to be used for read/ write 180 * @dma_mode: dma mode enable (1) or disable (0) 181 * @u32_count: number of bytes to be transferred 182 * @is_write: prefetch read(0) or write post(1) mode 183 */ 184static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, 185 unsigned int u32_count, int is_write, struct omap_nand_info *info) 186{ 187 u32 val; 188 189 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX) 190 return -1; 191 192 if (readl(info->reg.gpmc_prefetch_control)) 193 return -EBUSY; 194 195 /* Set the amount of bytes to be prefetched */ 196 writel(u32_count, info->reg.gpmc_prefetch_config2); 197 198 /* Set dma/mpu mode, the prefetch read / post write and 199 * enable the engine. Set which cs is has requested for. 200 */ 201 val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) | 202 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH | 203 (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write)); 204 writel(val, info->reg.gpmc_prefetch_config1); 205 206 /* Start the prefetch engine */ 207 writel(0x1, info->reg.gpmc_prefetch_control); 208 209 return 0; 210} 211 212/** 213 * omap_prefetch_reset - disables and stops the prefetch engine 214 */ 215static int omap_prefetch_reset(int cs, struct omap_nand_info *info) 216{ 217 u32 config1; 218 219 /* check if the same module/cs is trying to reset */ 220 config1 = readl(info->reg.gpmc_prefetch_config1); 221 if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs) 222 return -EINVAL; 223 224 /* Stop the PFPW engine */ 225 writel(0x0, info->reg.gpmc_prefetch_control); 226 227 /* Reset/disable the PFPW engine */ 228 writel(0x0, info->reg.gpmc_prefetch_config1); 229 230 return 0; 231} 232 233/** 234 * omap_hwcontrol - hardware specific access to control-lines 235 * @mtd: MTD device structure 236 * @cmd: command to device 237 * @ctrl: 238 * NAND_NCE: bit 0 -> don't care 239 * NAND_CLE: bit 1 -> Command Latch 240 * NAND_ALE: bit 2 -> Address Latch 241 * 242 * NOTE: boards may use different bits for these!! 243 */ 244static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 245{ 246 struct omap_nand_info *info = container_of(mtd, 247 struct omap_nand_info, mtd); 248 249 if (cmd != NAND_CMD_NONE) { 250 if (ctrl & NAND_CLE) 251 writeb(cmd, info->reg.gpmc_nand_command); 252 253 else if (ctrl & NAND_ALE) 254 writeb(cmd, info->reg.gpmc_nand_address); 255 256 else /* NAND_NCE */ 257 writeb(cmd, info->reg.gpmc_nand_data); 258 } 259} 260 261/** 262 * omap_read_buf8 - read data from NAND controller into buffer 263 * @mtd: MTD device structure 264 * @buf: buffer to store date 265 * @len: number of bytes to read 266 */ 267static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len) 268{ 269 struct nand_chip *nand = mtd->priv; 270 271 ioread8_rep(nand->IO_ADDR_R, buf, len); 272} 273 274/** 275 * omap_write_buf8 - write buffer to NAND controller 276 * @mtd: MTD device structure 277 * @buf: data buffer 278 * @len: number of bytes to write 279 */ 280static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) 281{ 282 struct omap_nand_info *info = container_of(mtd, 283 struct omap_nand_info, mtd); 284 u_char *p = (u_char *)buf; 285 u32 status = 0; 286 287 while (len--) { 288 iowrite8(*p++, info->nand.IO_ADDR_W); 289 /* wait until buffer is available for write */ 290 do { 291 status = readl(info->reg.gpmc_status) & 292 STATUS_BUFF_EMPTY; 293 } while (!status); 294 } 295} 296 297/** 298 * omap_read_buf16 - read data from NAND controller into buffer 299 * @mtd: MTD device structure 300 * @buf: buffer to store date 301 * @len: number of bytes to read 302 */ 303static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 304{ 305 struct nand_chip *nand = mtd->priv; 306 307 ioread16_rep(nand->IO_ADDR_R, buf, len / 2); 308} 309 310/** 311 * omap_write_buf16 - write buffer to NAND controller 312 * @mtd: MTD device structure 313 * @buf: data buffer 314 * @len: number of bytes to write 315 */ 316static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) 317{ 318 struct omap_nand_info *info = container_of(mtd, 319 struct omap_nand_info, mtd); 320 u16 *p = (u16 *) buf; 321 u32 status = 0; 322 /* FIXME try bursts of writesw() or DMA ... */ 323 len >>= 1; 324 325 while (len--) { 326 iowrite16(*p++, info->nand.IO_ADDR_W); 327 /* wait until buffer is available for write */ 328 do { 329 status = readl(info->reg.gpmc_status) & 330 STATUS_BUFF_EMPTY; 331 } while (!status); 332 } 333} 334 335/** 336 * omap_read_buf_pref - read data from NAND controller into buffer 337 * @mtd: MTD device structure 338 * @buf: buffer to store date 339 * @len: number of bytes to read 340 */ 341static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) 342{ 343 struct omap_nand_info *info = container_of(mtd, 344 struct omap_nand_info, mtd); 345 uint32_t r_count = 0; 346 int ret = 0; 347 u32 *p = (u32 *)buf; 348 349 /* take care of subpage reads */ 350 if (len % 4) { 351 if (info->nand.options & NAND_BUSWIDTH_16) 352 omap_read_buf16(mtd, buf, len % 4); 353 else 354 omap_read_buf8(mtd, buf, len % 4); 355 p = (u32 *) (buf + len % 4); 356 len -= len % 4; 357 } 358 359 /* configure and start prefetch transfer */ 360 ret = omap_prefetch_enable(info->gpmc_cs, 361 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info); 362 if (ret) { 363 /* PFPW engine is busy, use cpu copy method */ 364 if (info->nand.options & NAND_BUSWIDTH_16) 365 omap_read_buf16(mtd, (u_char *)p, len); 366 else 367 omap_read_buf8(mtd, (u_char *)p, len); 368 } else { 369 do { 370 r_count = readl(info->reg.gpmc_prefetch_status); 371 r_count = PREFETCH_STATUS_FIFO_CNT(r_count); 372 r_count = r_count >> 2; 373 ioread32_rep(info->nand.IO_ADDR_R, p, r_count); 374 p += r_count; 375 len -= r_count << 2; 376 } while (len); 377 /* disable and stop the PFPW engine */ 378 omap_prefetch_reset(info->gpmc_cs, info); 379 } 380} 381 382/** 383 * omap_write_buf_pref - write buffer to NAND controller 384 * @mtd: MTD device structure 385 * @buf: data buffer 386 * @len: number of bytes to write 387 */ 388static void omap_write_buf_pref(struct mtd_info *mtd, 389 const u_char *buf, int len) 390{ 391 struct omap_nand_info *info = container_of(mtd, 392 struct omap_nand_info, mtd); 393 uint32_t w_count = 0; 394 int i = 0, ret = 0; 395 u16 *p = (u16 *)buf; 396 unsigned long tim, limit; 397 u32 val; 398 399 /* take care of subpage writes */ 400 if (len % 2 != 0) { 401 writeb(*buf, info->nand.IO_ADDR_W); 402 p = (u16 *)(buf + 1); 403 len--; 404 } 405 406 /* configure and start prefetch transfer */ 407 ret = omap_prefetch_enable(info->gpmc_cs, 408 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info); 409 if (ret) { 410 /* PFPW engine is busy, use cpu copy method */ 411 if (info->nand.options & NAND_BUSWIDTH_16) 412 omap_write_buf16(mtd, (u_char *)p, len); 413 else 414 omap_write_buf8(mtd, (u_char *)p, len); 415 } else { 416 while (len) { 417 w_count = readl(info->reg.gpmc_prefetch_status); 418 w_count = PREFETCH_STATUS_FIFO_CNT(w_count); 419 w_count = w_count >> 1; 420 for (i = 0; (i < w_count) && len; i++, len -= 2) 421 iowrite16(*p++, info->nand.IO_ADDR_W); 422 } 423 /* wait for data to flushed-out before reset the prefetch */ 424 tim = 0; 425 limit = (loops_per_jiffy * 426 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 427 do { 428 cpu_relax(); 429 val = readl(info->reg.gpmc_prefetch_status); 430 val = PREFETCH_STATUS_COUNT(val); 431 } while (val && (tim++ < limit)); 432 433 /* disable and stop the PFPW engine */ 434 omap_prefetch_reset(info->gpmc_cs, info); 435 } 436} 437 438/* 439 * omap_nand_dma_callback: callback on the completion of dma transfer 440 * @data: pointer to completion data structure 441 */ 442static void omap_nand_dma_callback(void *data) 443{ 444 complete((struct completion *) data); 445} 446 447/* 448 * omap_nand_dma_transfer: configure and start dma transfer 449 * @mtd: MTD device structure 450 * @addr: virtual address in RAM of source/destination 451 * @len: number of data bytes to be transferred 452 * @is_write: flag for read/write operation 453 */ 454static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, 455 unsigned int len, int is_write) 456{ 457 struct omap_nand_info *info = container_of(mtd, 458 struct omap_nand_info, mtd); 459 struct dma_async_tx_descriptor *tx; 460 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : 461 DMA_FROM_DEVICE; 462 struct scatterlist sg; 463 unsigned long tim, limit; 464 unsigned n; 465 int ret; 466 u32 val; 467 468 if (addr >= high_memory) { 469 struct page *p1; 470 471 if (((size_t)addr & PAGE_MASK) != 472 ((size_t)(addr + len - 1) & PAGE_MASK)) 473 goto out_copy; 474 p1 = vmalloc_to_page(addr); 475 if (!p1) 476 goto out_copy; 477 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); 478 } 479 480 sg_init_one(&sg, addr, len); 481 n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); 482 if (n == 0) { 483 dev_err(&info->pdev->dev, 484 "Couldn't DMA map a %d byte buffer\n", len); 485 goto out_copy; 486 } 487 488 tx = dmaengine_prep_slave_sg(info->dma, &sg, n, 489 is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, 490 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 491 if (!tx) 492 goto out_copy_unmap; 493 494 tx->callback = omap_nand_dma_callback; 495 tx->callback_param = &info->comp; 496 dmaengine_submit(tx); 497 498 /* configure and start prefetch transfer */ 499 ret = omap_prefetch_enable(info->gpmc_cs, 500 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info); 501 if (ret) 502 /* PFPW engine is busy, use cpu copy method */ 503 goto out_copy_unmap; 504 505 init_completion(&info->comp); 506 dma_async_issue_pending(info->dma); 507 508 /* setup and start DMA using dma_addr */ 509 wait_for_completion(&info->comp); 510 tim = 0; 511 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 512 513 do { 514 cpu_relax(); 515 val = readl(info->reg.gpmc_prefetch_status); 516 val = PREFETCH_STATUS_COUNT(val); 517 } while (val && (tim++ < limit)); 518 519 /* disable and stop the PFPW engine */ 520 omap_prefetch_reset(info->gpmc_cs, info); 521 522 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); 523 return 0; 524 525out_copy_unmap: 526 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); 527out_copy: 528 if (info->nand.options & NAND_BUSWIDTH_16) 529 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 530 : omap_write_buf16(mtd, (u_char *) addr, len); 531 else 532 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len) 533 : omap_write_buf8(mtd, (u_char *) addr, len); 534 return 0; 535} 536 537/** 538 * omap_read_buf_dma_pref - read data from NAND controller into buffer 539 * @mtd: MTD device structure 540 * @buf: buffer to store date 541 * @len: number of bytes to read 542 */ 543static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len) 544{ 545 if (len <= mtd->oobsize) 546 omap_read_buf_pref(mtd, buf, len); 547 else 548 /* start transfer in DMA mode */ 549 omap_nand_dma_transfer(mtd, buf, len, 0x0); 550} 551 552/** 553 * omap_write_buf_dma_pref - write buffer to NAND controller 554 * @mtd: MTD device structure 555 * @buf: data buffer 556 * @len: number of bytes to write 557 */ 558static void omap_write_buf_dma_pref(struct mtd_info *mtd, 559 const u_char *buf, int len) 560{ 561 if (len <= mtd->oobsize) 562 omap_write_buf_pref(mtd, buf, len); 563 else 564 /* start transfer in DMA mode */ 565 omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); 566} 567 568/* 569 * omap_nand_irq - GPMC irq handler 570 * @this_irq: gpmc irq number 571 * @dev: omap_nand_info structure pointer is passed here 572 */ 573static irqreturn_t omap_nand_irq(int this_irq, void *dev) 574{ 575 struct omap_nand_info *info = (struct omap_nand_info *) dev; 576 u32 bytes; 577 578 bytes = readl(info->reg.gpmc_prefetch_status); 579 bytes = PREFETCH_STATUS_FIFO_CNT(bytes); 580 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ 581 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ 582 if (this_irq == info->gpmc_irq_count) 583 goto done; 584 585 if (info->buf_len && (info->buf_len < bytes)) 586 bytes = info->buf_len; 587 else if (!info->buf_len) 588 bytes = 0; 589 iowrite32_rep(info->nand.IO_ADDR_W, 590 (u32 *)info->buf, bytes >> 2); 591 info->buf = info->buf + bytes; 592 info->buf_len -= bytes; 593 594 } else { 595 ioread32_rep(info->nand.IO_ADDR_R, 596 (u32 *)info->buf, bytes >> 2); 597 info->buf = info->buf + bytes; 598 599 if (this_irq == info->gpmc_irq_count) 600 goto done; 601 } 602 603 return IRQ_HANDLED; 604 605done: 606 complete(&info->comp); 607 608 disable_irq_nosync(info->gpmc_irq_fifo); 609 disable_irq_nosync(info->gpmc_irq_count); 610 611 return IRQ_HANDLED; 612} 613 614/* 615 * omap_read_buf_irq_pref - read data from NAND controller into buffer 616 * @mtd: MTD device structure 617 * @buf: buffer to store date 618 * @len: number of bytes to read 619 */ 620static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) 621{ 622 struct omap_nand_info *info = container_of(mtd, 623 struct omap_nand_info, mtd); 624 int ret = 0; 625 626 if (len <= mtd->oobsize) { 627 omap_read_buf_pref(mtd, buf, len); 628 return; 629 } 630 631 info->iomode = OMAP_NAND_IO_READ; 632 info->buf = buf; 633 init_completion(&info->comp); 634 635 /* configure and start prefetch transfer */ 636 ret = omap_prefetch_enable(info->gpmc_cs, 637 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info); 638 if (ret) 639 /* PFPW engine is busy, use cpu copy method */ 640 goto out_copy; 641 642 info->buf_len = len; 643 644 enable_irq(info->gpmc_irq_count); 645 enable_irq(info->gpmc_irq_fifo); 646 647 /* waiting for read to complete */ 648 wait_for_completion(&info->comp); 649 650 /* disable and stop the PFPW engine */ 651 omap_prefetch_reset(info->gpmc_cs, info); 652 return; 653 654out_copy: 655 if (info->nand.options & NAND_BUSWIDTH_16) 656 omap_read_buf16(mtd, buf, len); 657 else 658 omap_read_buf8(mtd, buf, len); 659} 660 661/* 662 * omap_write_buf_irq_pref - write buffer to NAND controller 663 * @mtd: MTD device structure 664 * @buf: data buffer 665 * @len: number of bytes to write 666 */ 667static void omap_write_buf_irq_pref(struct mtd_info *mtd, 668 const u_char *buf, int len) 669{ 670 struct omap_nand_info *info = container_of(mtd, 671 struct omap_nand_info, mtd); 672 int ret = 0; 673 unsigned long tim, limit; 674 u32 val; 675 676 if (len <= mtd->oobsize) { 677 omap_write_buf_pref(mtd, buf, len); 678 return; 679 } 680 681 info->iomode = OMAP_NAND_IO_WRITE; 682 info->buf = (u_char *) buf; 683 init_completion(&info->comp); 684 685 /* configure and start prefetch transfer : size=24 */ 686 ret = omap_prefetch_enable(info->gpmc_cs, 687 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info); 688 if (ret) 689 /* PFPW engine is busy, use cpu copy method */ 690 goto out_copy; 691 692 info->buf_len = len; 693 694 enable_irq(info->gpmc_irq_count); 695 enable_irq(info->gpmc_irq_fifo); 696 697 /* waiting for write to complete */ 698 wait_for_completion(&info->comp); 699 700 /* wait for data to flushed-out before reset the prefetch */ 701 tim = 0; 702 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 703 do { 704 val = readl(info->reg.gpmc_prefetch_status); 705 val = PREFETCH_STATUS_COUNT(val); 706 cpu_relax(); 707 } while (val && (tim++ < limit)); 708 709 /* disable and stop the PFPW engine */ 710 omap_prefetch_reset(info->gpmc_cs, info); 711 return; 712 713out_copy: 714 if (info->nand.options & NAND_BUSWIDTH_16) 715 omap_write_buf16(mtd, buf, len); 716 else 717 omap_write_buf8(mtd, buf, len); 718} 719 720/** 721 * gen_true_ecc - This function will generate true ECC value 722 * @ecc_buf: buffer to store ecc code 723 * 724 * This generated true ECC value can be used when correcting 725 * data read from NAND flash memory core 726 */ 727static void gen_true_ecc(u8 *ecc_buf) 728{ 729 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) | 730 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8); 731 732 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) | 733 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp)); 734 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) | 735 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp)); 736 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) | 737 P1e(tmp) | P2048o(tmp) | P2048e(tmp)); 738} 739 740/** 741 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data 742 * @ecc_data1: ecc code from nand spare area 743 * @ecc_data2: ecc code from hardware register obtained from hardware ecc 744 * @page_data: page data 745 * 746 * This function compares two ECC's and indicates if there is an error. 747 * If the error can be corrected it will be corrected to the buffer. 748 * If there is no error, %0 is returned. If there is an error but it 749 * was corrected, %1 is returned. Otherwise, %-1 is returned. 750 */ 751static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ 752 u8 *ecc_data2, /* read from register */ 753 u8 *page_data) 754{ 755 uint i; 756 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8]; 757 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8]; 758 u8 ecc_bit[24]; 759 u8 ecc_sum = 0; 760 u8 find_bit = 0; 761 uint find_byte = 0; 762 int isEccFF; 763 764 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF); 765 766 gen_true_ecc(ecc_data1); 767 gen_true_ecc(ecc_data2); 768 769 for (i = 0; i <= 2; i++) { 770 *(ecc_data1 + i) = ~(*(ecc_data1 + i)); 771 *(ecc_data2 + i) = ~(*(ecc_data2 + i)); 772 } 773 774 for (i = 0; i < 8; i++) { 775 tmp0_bit[i] = *ecc_data1 % 2; 776 *ecc_data1 = *ecc_data1 / 2; 777 } 778 779 for (i = 0; i < 8; i++) { 780 tmp1_bit[i] = *(ecc_data1 + 1) % 2; 781 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2; 782 } 783 784 for (i = 0; i < 8; i++) { 785 tmp2_bit[i] = *(ecc_data1 + 2) % 2; 786 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2; 787 } 788 789 for (i = 0; i < 8; i++) { 790 comp0_bit[i] = *ecc_data2 % 2; 791 *ecc_data2 = *ecc_data2 / 2; 792 } 793 794 for (i = 0; i < 8; i++) { 795 comp1_bit[i] = *(ecc_data2 + 1) % 2; 796 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2; 797 } 798 799 for (i = 0; i < 8; i++) { 800 comp2_bit[i] = *(ecc_data2 + 2) % 2; 801 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2; 802 } 803 804 for (i = 0; i < 6; i++) 805 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2]; 806 807 for (i = 0; i < 8; i++) 808 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i]; 809 810 for (i = 0; i < 8; i++) 811 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i]; 812 813 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0]; 814 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1]; 815 816 for (i = 0; i < 24; i++) 817 ecc_sum += ecc_bit[i]; 818 819 switch (ecc_sum) { 820 case 0: 821 /* Not reached because this function is not called if 822 * ECC values are equal 823 */ 824 return 0; 825 826 case 1: 827 /* Uncorrectable error */ 828 pr_debug("ECC UNCORRECTED_ERROR 1\n"); 829 return -1; 830 831 case 11: 832 /* UN-Correctable error */ 833 pr_debug("ECC UNCORRECTED_ERROR B\n"); 834 return -1; 835 836 case 12: 837 /* Correctable error */ 838 find_byte = (ecc_bit[23] << 8) + 839 (ecc_bit[21] << 7) + 840 (ecc_bit[19] << 6) + 841 (ecc_bit[17] << 5) + 842 (ecc_bit[15] << 4) + 843 (ecc_bit[13] << 3) + 844 (ecc_bit[11] << 2) + 845 (ecc_bit[9] << 1) + 846 ecc_bit[7]; 847 848 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1]; 849 850 pr_debug("Correcting single bit ECC error at offset: " 851 "%d, bit: %d\n", find_byte, find_bit); 852 853 page_data[find_byte] ^= (1 << find_bit); 854 855 return 1; 856 default: 857 if (isEccFF) { 858 if (ecc_data2[0] == 0 && 859 ecc_data2[1] == 0 && 860 ecc_data2[2] == 0) 861 return 0; 862 } 863 pr_debug("UNCORRECTED_ERROR default\n"); 864 return -1; 865 } 866} 867 868/** 869 * omap_correct_data - Compares the ECC read with HW generated ECC 870 * @mtd: MTD device structure 871 * @dat: page data 872 * @read_ecc: ecc read from nand flash 873 * @calc_ecc: ecc read from HW ECC registers 874 * 875 * Compares the ecc read from nand spare area with ECC registers values 876 * and if ECC's mismatched, it will call 'omap_compare_ecc' for error 877 * detection and correction. If there are no errors, %0 is returned. If 878 * there were errors and all of the errors were corrected, the number of 879 * corrected errors is returned. If uncorrectable errors exist, %-1 is 880 * returned. 881 */ 882static int omap_correct_data(struct mtd_info *mtd, u_char *dat, 883 u_char *read_ecc, u_char *calc_ecc) 884{ 885 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 886 mtd); 887 int blockCnt = 0, i = 0, ret = 0; 888 int stat = 0; 889 890 /* Ex NAND_ECC_HW12_2048 */ 891 if ((info->nand.ecc.mode == NAND_ECC_HW) && 892 (info->nand.ecc.size == 2048)) 893 blockCnt = 4; 894 else 895 blockCnt = 1; 896 897 for (i = 0; i < blockCnt; i++) { 898 if (memcmp(read_ecc, calc_ecc, 3) != 0) { 899 ret = omap_compare_ecc(read_ecc, calc_ecc, dat); 900 if (ret < 0) 901 return ret; 902 /* keep track of the number of corrected errors */ 903 stat += ret; 904 } 905 read_ecc += 3; 906 calc_ecc += 3; 907 dat += 512; 908 } 909 return stat; 910} 911 912/** 913 * omap_calcuate_ecc - Generate non-inverted ECC bytes. 914 * @mtd: MTD device structure 915 * @dat: The pointer to data on which ecc is computed 916 * @ecc_code: The ecc_code buffer 917 * 918 * Using noninverted ECC can be considered ugly since writing a blank 919 * page ie. padding will clear the ECC bytes. This is no problem as long 920 * nobody is trying to write data on the seemingly unused page. Reading 921 * an erased page will produce an ECC mismatch between generated and read 922 * ECC bytes that has to be dealt with separately. 923 */ 924static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 925 u_char *ecc_code) 926{ 927 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 928 mtd); 929 u32 val; 930 931 val = readl(info->reg.gpmc_ecc_config); 932 if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs) 933 return -EINVAL; 934 935 /* read ecc result */ 936 val = readl(info->reg.gpmc_ecc1_result); 937 *ecc_code++ = val; /* P128e, ..., P1e */ 938 *ecc_code++ = val >> 16; /* P128o, ..., P1o */ 939 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */ 940 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0); 941 942 return 0; 943} 944 945/** 946 * omap_enable_hwecc - This function enables the hardware ecc functionality 947 * @mtd: MTD device structure 948 * @mode: Read/Write mode 949 */ 950static void omap_enable_hwecc(struct mtd_info *mtd, int mode) 951{ 952 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 953 mtd); 954 struct nand_chip *chip = mtd->priv; 955 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 956 u32 val; 957 958 /* clear ecc and enable bits */ 959 val = ECCCLEAR | ECC1; 960 writel(val, info->reg.gpmc_ecc_control); 961 962 /* program ecc and result sizes */ 963 val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) | 964 ECC1RESULTSIZE); 965 writel(val, info->reg.gpmc_ecc_size_config); 966 967 switch (mode) { 968 case NAND_ECC_READ: 969 case NAND_ECC_WRITE: 970 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); 971 break; 972 case NAND_ECC_READSYN: 973 writel(ECCCLEAR, info->reg.gpmc_ecc_control); 974 break; 975 default: 976 dev_info(&info->pdev->dev, 977 "error: unrecognized Mode[%d]!\n", mode); 978 break; 979 } 980 981 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */ 982 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1); 983 writel(val, info->reg.gpmc_ecc_config); 984} 985 986/** 987 * omap_wait - wait until the command is done 988 * @mtd: MTD device structure 989 * @chip: NAND Chip structure 990 * 991 * Wait function is called during Program and erase operations and 992 * the way it is called from MTD layer, we should wait till the NAND 993 * chip is ready after the programming/erase operation has completed. 994 * 995 * Erase can take up to 400ms and program up to 20ms according to 996 * general NAND and SmartMedia specs 997 */ 998static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) 999{ 1000 struct nand_chip *this = mtd->priv; 1001 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1002 mtd); 1003 unsigned long timeo = jiffies; 1004 int status, state = this->state; 1005 1006 if (state == FL_ERASING) 1007 timeo += msecs_to_jiffies(400); 1008 else 1009 timeo += msecs_to_jiffies(20); 1010 1011 writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); 1012 while (time_before(jiffies, timeo)) { 1013 status = readb(info->reg.gpmc_nand_data); 1014 if (status & NAND_STATUS_READY) 1015 break; 1016 cond_resched(); 1017 } 1018 1019 status = readb(info->reg.gpmc_nand_data); 1020 return status; 1021} 1022 1023/** 1024 * omap_dev_ready - calls the platform specific dev_ready function 1025 * @mtd: MTD device structure 1026 */ 1027static int omap_dev_ready(struct mtd_info *mtd) 1028{ 1029 unsigned int val = 0; 1030 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1031 mtd); 1032 1033 val = readl(info->reg.gpmc_status); 1034 1035 if ((val & 0x100) == 0x100) { 1036 return 1; 1037 } else { 1038 return 0; 1039 } 1040} 1041 1042/** 1043 * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation 1044 * @mtd: MTD device structure 1045 * @mode: Read/Write mode 1046 * 1047 * When using BCH, sector size is hardcoded to 512 bytes. 1048 * Using wrapping mode 6 both for reading and writing if ELM module not uses 1049 * for error correction. 1050 * On writing, 1051 * eccsize0 = 0 (no additional protected byte in spare area) 1052 * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) 1053 */ 1054static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode) 1055{ 1056 unsigned int bch_type; 1057 unsigned int dev_width, nsectors; 1058 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1059 mtd); 1060 enum omap_ecc ecc_opt = info->ecc_opt; 1061 struct nand_chip *chip = mtd->priv; 1062 u32 val, wr_mode; 1063 unsigned int ecc_size1, ecc_size0; 1064 1065 /* GPMC configurations for calculating ECC */ 1066 switch (ecc_opt) { 1067 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1068 bch_type = 0; 1069 nsectors = 1; 1070 if (mode == NAND_ECC_READ) { 1071 wr_mode = BCH_WRAPMODE_6; 1072 ecc_size0 = BCH_ECC_SIZE0; 1073 ecc_size1 = BCH_ECC_SIZE1; 1074 } else { 1075 wr_mode = BCH_WRAPMODE_6; 1076 ecc_size0 = BCH_ECC_SIZE0; 1077 ecc_size1 = BCH_ECC_SIZE1; 1078 } 1079 break; 1080 case OMAP_ECC_BCH4_CODE_HW: 1081 bch_type = 0; 1082 nsectors = chip->ecc.steps; 1083 if (mode == NAND_ECC_READ) { 1084 wr_mode = BCH_WRAPMODE_1; 1085 ecc_size0 = BCH4R_ECC_SIZE0; 1086 ecc_size1 = BCH4R_ECC_SIZE1; 1087 } else { 1088 wr_mode = BCH_WRAPMODE_6; 1089 ecc_size0 = BCH_ECC_SIZE0; 1090 ecc_size1 = BCH_ECC_SIZE1; 1091 } 1092 break; 1093 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1094 bch_type = 1; 1095 nsectors = 1; 1096 if (mode == NAND_ECC_READ) { 1097 wr_mode = BCH_WRAPMODE_6; 1098 ecc_size0 = BCH_ECC_SIZE0; 1099 ecc_size1 = BCH_ECC_SIZE1; 1100 } else { 1101 wr_mode = BCH_WRAPMODE_6; 1102 ecc_size0 = BCH_ECC_SIZE0; 1103 ecc_size1 = BCH_ECC_SIZE1; 1104 } 1105 break; 1106 case OMAP_ECC_BCH8_CODE_HW: 1107 bch_type = 1; 1108 nsectors = chip->ecc.steps; 1109 if (mode == NAND_ECC_READ) { 1110 wr_mode = BCH_WRAPMODE_1; 1111 ecc_size0 = BCH8R_ECC_SIZE0; 1112 ecc_size1 = BCH8R_ECC_SIZE1; 1113 } else { 1114 wr_mode = BCH_WRAPMODE_6; 1115 ecc_size0 = BCH_ECC_SIZE0; 1116 ecc_size1 = BCH_ECC_SIZE1; 1117 } 1118 break; 1119 case OMAP_ECC_BCH16_CODE_HW: 1120 bch_type = 0x2; 1121 nsectors = chip->ecc.steps; 1122 if (mode == NAND_ECC_READ) { 1123 wr_mode = 0x01; 1124 ecc_size0 = 52; /* ECC bits in nibbles per sector */ 1125 ecc_size1 = 0; /* non-ECC bits in nibbles per sector */ 1126 } else { 1127 wr_mode = 0x01; 1128 ecc_size0 = 0; /* extra bits in nibbles per sector */ 1129 ecc_size1 = 52; /* OOB bits in nibbles per sector */ 1130 } 1131 break; 1132 default: 1133 return; 1134 } 1135 1136 writel(ECC1, info->reg.gpmc_ecc_control); 1137 1138 /* Configure ecc size for BCH */ 1139 val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT); 1140 writel(val, info->reg.gpmc_ecc_size_config); 1141 1142 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 1143 1144 /* BCH configuration */ 1145 val = ((1 << 16) | /* enable BCH */ 1146 (bch_type << 12) | /* BCH4/BCH8/BCH16 */ 1147 (wr_mode << 8) | /* wrap mode */ 1148 (dev_width << 7) | /* bus width */ 1149 (((nsectors-1) & 0x7) << 4) | /* number of sectors */ 1150 (info->gpmc_cs << 1) | /* ECC CS */ 1151 (0x1)); /* enable ECC */ 1152 1153 writel(val, info->reg.gpmc_ecc_config); 1154 1155 /* Clear ecc and enable bits */ 1156 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control); 1157} 1158 1159static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f}; 1160static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, 1161 0x97, 0x79, 0xe5, 0x24, 0xb5}; 1162 1163/** 1164 * omap_calculate_ecc_bch - Generate bytes of ECC bytes 1165 * @mtd: MTD device structure 1166 * @dat: The pointer to data on which ecc is computed 1167 * @ecc_code: The ecc_code buffer 1168 * 1169 * Support calculating of BCH4/8 ecc vectors for the page 1170 */ 1171static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, 1172 const u_char *dat, u_char *ecc_calc) 1173{ 1174 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1175 mtd); 1176 int eccbytes = info->nand.ecc.bytes; 1177 struct gpmc_nand_regs *gpmc_regs = &info->reg; 1178 u8 *ecc_code; 1179 unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; 1180 u32 val; 1181 int i, j; 1182 1183 nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; 1184 for (i = 0; i < nsectors; i++) { 1185 ecc_code = ecc_calc; 1186 switch (info->ecc_opt) { 1187 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1188 case OMAP_ECC_BCH8_CODE_HW: 1189 bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1190 bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1191 bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); 1192 bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); 1193 *ecc_code++ = (bch_val4 & 0xFF); 1194 *ecc_code++ = ((bch_val3 >> 24) & 0xFF); 1195 *ecc_code++ = ((bch_val3 >> 16) & 0xFF); 1196 *ecc_code++ = ((bch_val3 >> 8) & 0xFF); 1197 *ecc_code++ = (bch_val3 & 0xFF); 1198 *ecc_code++ = ((bch_val2 >> 24) & 0xFF); 1199 *ecc_code++ = ((bch_val2 >> 16) & 0xFF); 1200 *ecc_code++ = ((bch_val2 >> 8) & 0xFF); 1201 *ecc_code++ = (bch_val2 & 0xFF); 1202 *ecc_code++ = ((bch_val1 >> 24) & 0xFF); 1203 *ecc_code++ = ((bch_val1 >> 16) & 0xFF); 1204 *ecc_code++ = ((bch_val1 >> 8) & 0xFF); 1205 *ecc_code++ = (bch_val1 & 0xFF); 1206 break; 1207 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1208 case OMAP_ECC_BCH4_CODE_HW: 1209 bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); 1210 bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); 1211 *ecc_code++ = ((bch_val2 >> 12) & 0xFF); 1212 *ecc_code++ = ((bch_val2 >> 4) & 0xFF); 1213 *ecc_code++ = ((bch_val2 & 0xF) << 4) | 1214 ((bch_val1 >> 28) & 0xF); 1215 *ecc_code++ = ((bch_val1 >> 20) & 0xFF); 1216 *ecc_code++ = ((bch_val1 >> 12) & 0xFF); 1217 *ecc_code++ = ((bch_val1 >> 4) & 0xFF); 1218 *ecc_code++ = ((bch_val1 & 0xF) << 4); 1219 break; 1220 case OMAP_ECC_BCH16_CODE_HW: 1221 val = readl(gpmc_regs->gpmc_bch_result6[i]); 1222 ecc_code[0] = ((val >> 8) & 0xFF); 1223 ecc_code[1] = ((val >> 0) & 0xFF); 1224 val = readl(gpmc_regs->gpmc_bch_result5[i]); 1225 ecc_code[2] = ((val >> 24) & 0xFF); 1226 ecc_code[3] = ((val >> 16) & 0xFF); 1227 ecc_code[4] = ((val >> 8) & 0xFF); 1228 ecc_code[5] = ((val >> 0) & 0xFF); 1229 val = readl(gpmc_regs->gpmc_bch_result4[i]); 1230 ecc_code[6] = ((val >> 24) & 0xFF); 1231 ecc_code[7] = ((val >> 16) & 0xFF); 1232 ecc_code[8] = ((val >> 8) & 0xFF); 1233 ecc_code[9] = ((val >> 0) & 0xFF); 1234 val = readl(gpmc_regs->gpmc_bch_result3[i]); 1235 ecc_code[10] = ((val >> 24) & 0xFF); 1236 ecc_code[11] = ((val >> 16) & 0xFF); 1237 ecc_code[12] = ((val >> 8) & 0xFF); 1238 ecc_code[13] = ((val >> 0) & 0xFF); 1239 val = readl(gpmc_regs->gpmc_bch_result2[i]); 1240 ecc_code[14] = ((val >> 24) & 0xFF); 1241 ecc_code[15] = ((val >> 16) & 0xFF); 1242 ecc_code[16] = ((val >> 8) & 0xFF); 1243 ecc_code[17] = ((val >> 0) & 0xFF); 1244 val = readl(gpmc_regs->gpmc_bch_result1[i]); 1245 ecc_code[18] = ((val >> 24) & 0xFF); 1246 ecc_code[19] = ((val >> 16) & 0xFF); 1247 ecc_code[20] = ((val >> 8) & 0xFF); 1248 ecc_code[21] = ((val >> 0) & 0xFF); 1249 val = readl(gpmc_regs->gpmc_bch_result0[i]); 1250 ecc_code[22] = ((val >> 24) & 0xFF); 1251 ecc_code[23] = ((val >> 16) & 0xFF); 1252 ecc_code[24] = ((val >> 8) & 0xFF); 1253 ecc_code[25] = ((val >> 0) & 0xFF); 1254 break; 1255 default: 1256 return -EINVAL; 1257 } 1258 1259 /* ECC scheme specific syndrome customizations */ 1260 switch (info->ecc_opt) { 1261 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1262 /* Add constant polynomial to remainder, so that 1263 * ECC of blank pages results in 0x0 on reading back */ 1264 for (j = 0; j < eccbytes; j++) 1265 ecc_calc[j] ^= bch4_polynomial[j]; 1266 break; 1267 case OMAP_ECC_BCH4_CODE_HW: 1268 /* Set 8th ECC byte as 0x0 for ROM compatibility */ 1269 ecc_calc[eccbytes - 1] = 0x0; 1270 break; 1271 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1272 /* Add constant polynomial to remainder, so that 1273 * ECC of blank pages results in 0x0 on reading back */ 1274 for (j = 0; j < eccbytes; j++) 1275 ecc_calc[j] ^= bch8_polynomial[j]; 1276 break; 1277 case OMAP_ECC_BCH8_CODE_HW: 1278 /* Set 14th ECC byte as 0x0 for ROM compatibility */ 1279 ecc_calc[eccbytes - 1] = 0x0; 1280 break; 1281 case OMAP_ECC_BCH16_CODE_HW: 1282 break; 1283 default: 1284 return -EINVAL; 1285 } 1286 1287 ecc_calc += eccbytes; 1288 } 1289 1290 return 0; 1291} 1292 1293/** 1294 * erased_sector_bitflips - count bit flips 1295 * @data: data sector buffer 1296 * @oob: oob buffer 1297 * @info: omap_nand_info 1298 * 1299 * Check the bit flips in erased page falls below correctable level. 1300 * If falls below, report the page as erased with correctable bit 1301 * flip, else report as uncorrectable page. 1302 */ 1303static int erased_sector_bitflips(u_char *data, u_char *oob, 1304 struct omap_nand_info *info) 1305{ 1306 int flip_bits = 0, i; 1307 1308 for (i = 0; i < info->nand.ecc.size; i++) { 1309 flip_bits += hweight8(~data[i]); 1310 if (flip_bits > info->nand.ecc.strength) 1311 return 0; 1312 } 1313 1314 for (i = 0; i < info->nand.ecc.bytes - 1; i++) { 1315 flip_bits += hweight8(~oob[i]); 1316 if (flip_bits > info->nand.ecc.strength) 1317 return 0; 1318 } 1319 1320 /* 1321 * Bit flips falls in correctable level. 1322 * Fill data area with 0xFF 1323 */ 1324 if (flip_bits) { 1325 memset(data, 0xFF, info->nand.ecc.size); 1326 memset(oob, 0xFF, info->nand.ecc.bytes); 1327 } 1328 1329 return flip_bits; 1330} 1331 1332/** 1333 * omap_elm_correct_data - corrects page data area in case error reported 1334 * @mtd: MTD device structure 1335 * @data: page data 1336 * @read_ecc: ecc read from nand flash 1337 * @calc_ecc: ecc read from HW ECC registers 1338 * 1339 * Calculated ecc vector reported as zero in case of non-error pages. 1340 * In case of non-zero ecc vector, first filter out erased-pages, and 1341 * then process data via ELM to detect bit-flips. 1342 */ 1343static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data, 1344 u_char *read_ecc, u_char *calc_ecc) 1345{ 1346 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1347 mtd); 1348 struct nand_ecc_ctrl *ecc = &info->nand.ecc; 1349 int eccsteps = info->nand.ecc.steps; 1350 int i , j, stat = 0; 1351 int eccflag, actual_eccbytes; 1352 struct elm_errorvec err_vec[ERROR_VECTOR_MAX]; 1353 u_char *ecc_vec = calc_ecc; 1354 u_char *spare_ecc = read_ecc; 1355 u_char *erased_ecc_vec; 1356 u_char *buf; 1357 int bitflip_count; 1358 bool is_error_reported = false; 1359 u32 bit_pos, byte_pos, error_max, pos; 1360 int err; 1361 1362 switch (info->ecc_opt) { 1363 case OMAP_ECC_BCH4_CODE_HW: 1364 /* omit 7th ECC byte reserved for ROM code compatibility */ 1365 actual_eccbytes = ecc->bytes - 1; 1366 erased_ecc_vec = bch4_vector; 1367 break; 1368 case OMAP_ECC_BCH8_CODE_HW: 1369 /* omit 14th ECC byte reserved for ROM code compatibility */ 1370 actual_eccbytes = ecc->bytes - 1; 1371 erased_ecc_vec = bch8_vector; 1372 break; 1373 case OMAP_ECC_BCH16_CODE_HW: 1374 actual_eccbytes = ecc->bytes; 1375 erased_ecc_vec = bch16_vector; 1376 break; 1377 default: 1378 dev_err(&info->pdev->dev, "invalid driver configuration\n"); 1379 return -EINVAL; 1380 } 1381 1382 /* Initialize elm error vector to zero */ 1383 memset(err_vec, 0, sizeof(err_vec)); 1384 1385 for (i = 0; i < eccsteps ; i++) { 1386 eccflag = 0; /* initialize eccflag */ 1387 1388 /* 1389 * Check any error reported, 1390 * In case of error, non zero ecc reported. 1391 */ 1392 for (j = 0; j < actual_eccbytes; j++) { 1393 if (calc_ecc[j] != 0) { 1394 eccflag = 1; /* non zero ecc, error present */ 1395 break; 1396 } 1397 } 1398 1399 if (eccflag == 1) { 1400 if (memcmp(calc_ecc, erased_ecc_vec, 1401 actual_eccbytes) == 0) { 1402 /* 1403 * calc_ecc[] matches pattern for ECC(all 0xff) 1404 * so this is definitely an erased-page 1405 */ 1406 } else { 1407 buf = &data[info->nand.ecc.size * i]; 1408 /* 1409 * count number of 0-bits in read_buf. 1410 * This check can be removed once a similar 1411 * check is introduced in generic NAND driver 1412 */ 1413 bitflip_count = erased_sector_bitflips( 1414 buf, read_ecc, info); 1415 if (bitflip_count) { 1416 /* 1417 * number of 0-bits within ECC limits 1418 * So this may be an erased-page 1419 */ 1420 stat += bitflip_count; 1421 } else { 1422 /* 1423 * Too many 0-bits. It may be a 1424 * - programmed-page, OR 1425 * - erased-page with many bit-flips 1426 * So this page requires check by ELM 1427 */ 1428 err_vec[i].error_reported = true; 1429 is_error_reported = true; 1430 } 1431 } 1432 } 1433 1434 /* Update the ecc vector */ 1435 calc_ecc += ecc->bytes; 1436 read_ecc += ecc->bytes; 1437 } 1438 1439 /* Check if any error reported */ 1440 if (!is_error_reported) 1441 return stat; 1442 1443 /* Decode BCH error using ELM module */ 1444 elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec); 1445 1446 err = 0; 1447 for (i = 0; i < eccsteps; i++) { 1448 if (err_vec[i].error_uncorrectable) { 1449 dev_err(&info->pdev->dev, 1450 "uncorrectable bit-flips found\n"); 1451 err = -EBADMSG; 1452 } else if (err_vec[i].error_reported) { 1453 for (j = 0; j < err_vec[i].error_count; j++) { 1454 switch (info->ecc_opt) { 1455 case OMAP_ECC_BCH4_CODE_HW: 1456 /* Add 4 bits to take care of padding */ 1457 pos = err_vec[i].error_loc[j] + 1458 BCH4_BIT_PAD; 1459 break; 1460 case OMAP_ECC_BCH8_CODE_HW: 1461 case OMAP_ECC_BCH16_CODE_HW: 1462 pos = err_vec[i].error_loc[j]; 1463 break; 1464 default: 1465 return -EINVAL; 1466 } 1467 error_max = (ecc->size + actual_eccbytes) * 8; 1468 /* Calculate bit position of error */ 1469 bit_pos = pos % 8; 1470 1471 /* Calculate byte position of error */ 1472 byte_pos = (error_max - pos - 1) / 8; 1473 1474 if (pos < error_max) { 1475 if (byte_pos < 512) { 1476 pr_debug("bitflip@dat[%d]=%x\n", 1477 byte_pos, data[byte_pos]); 1478 data[byte_pos] ^= 1 << bit_pos; 1479 } else { 1480 pr_debug("bitflip@oob[%d]=%x\n", 1481 (byte_pos - 512), 1482 spare_ecc[byte_pos - 512]); 1483 spare_ecc[byte_pos - 512] ^= 1484 1 << bit_pos; 1485 } 1486 } else { 1487 dev_err(&info->pdev->dev, 1488 "invalid bit-flip @ %d:%d\n", 1489 byte_pos, bit_pos); 1490 err = -EBADMSG; 1491 } 1492 } 1493 } 1494 1495 /* Update number of correctable errors */ 1496 stat += err_vec[i].error_count; 1497 1498 /* Update page data with sector size */ 1499 data += ecc->size; 1500 spare_ecc += ecc->bytes; 1501 } 1502 1503 return (err) ? err : stat; 1504} 1505 1506/** 1507 * omap_write_page_bch - BCH ecc based write page function for entire page 1508 * @mtd: mtd info structure 1509 * @chip: nand chip info structure 1510 * @buf: data buffer 1511 * @oob_required: must write chip->oob_poi to OOB 1512 * 1513 * Custom write page method evolved to support multi sector writing in one shot 1514 */ 1515static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 1516 const uint8_t *buf, int oob_required) 1517{ 1518 int i; 1519 uint8_t *ecc_calc = chip->buffers->ecccalc; 1520 uint32_t *eccpos = chip->ecc.layout->eccpos; 1521 1522 /* Enable GPMC ecc engine */ 1523 chip->ecc.hwctl(mtd, NAND_ECC_WRITE); 1524 1525 /* Write data */ 1526 chip->write_buf(mtd, buf, mtd->writesize); 1527 1528 /* Update ecc vector from GPMC result registers */ 1529 chip->ecc.calculate(mtd, buf, &ecc_calc[0]); 1530 1531 for (i = 0; i < chip->ecc.total; i++) 1532 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 1533 1534 /* Write ecc vector to OOB area */ 1535 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1536 return 0; 1537} 1538 1539/** 1540 * omap_read_page_bch - BCH ecc based page read function for entire page 1541 * @mtd: mtd info structure 1542 * @chip: nand chip info structure 1543 * @buf: buffer to store read data 1544 * @oob_required: caller requires OOB data read to chip->oob_poi 1545 * @page: page number to read 1546 * 1547 * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module 1548 * used for error correction. 1549 * Custom method evolved to support ELM error correction & multi sector 1550 * reading. On reading page data area is read along with OOB data with 1551 * ecc engine enabled. ecc vector updated after read of OOB data. 1552 * For non error pages ecc vector reported as zero. 1553 */ 1554static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, 1555 uint8_t *buf, int oob_required, int page) 1556{ 1557 uint8_t *ecc_calc = chip->buffers->ecccalc; 1558 uint8_t *ecc_code = chip->buffers->ecccode; 1559 uint32_t *eccpos = chip->ecc.layout->eccpos; 1560 uint8_t *oob = &chip->oob_poi[eccpos[0]]; 1561 uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0]; 1562 int stat; 1563 unsigned int max_bitflips = 0; 1564 1565 /* Enable GPMC ecc engine */ 1566 chip->ecc.hwctl(mtd, NAND_ECC_READ); 1567 1568 /* Read data */ 1569 chip->read_buf(mtd, buf, mtd->writesize); 1570 1571 /* Read oob bytes */ 1572 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1); 1573 chip->read_buf(mtd, oob, chip->ecc.total); 1574 1575 /* Calculate ecc bytes */ 1576 chip->ecc.calculate(mtd, buf, ecc_calc); 1577 1578 memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total); 1579 1580 stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc); 1581 1582 if (stat < 0) { 1583 mtd->ecc_stats.failed++; 1584 } else { 1585 mtd->ecc_stats.corrected += stat; 1586 max_bitflips = max_t(unsigned int, max_bitflips, stat); 1587 } 1588 1589 return max_bitflips; 1590} 1591 1592/** 1593 * is_elm_present - checks for presence of ELM module by scanning DT nodes 1594 * @omap_nand_info: NAND device structure containing platform data 1595 */ 1596static bool is_elm_present(struct omap_nand_info *info, 1597 struct device_node *elm_node) 1598{ 1599 struct platform_device *pdev; 1600 1601 /* check whether elm-id is passed via DT */ 1602 if (!elm_node) { 1603 dev_err(&info->pdev->dev, "ELM devicetree node not found\n"); 1604 return false; 1605 } 1606 pdev = of_find_device_by_node(elm_node); 1607 /* check whether ELM device is registered */ 1608 if (!pdev) { 1609 dev_err(&info->pdev->dev, "ELM device not found\n"); 1610 return false; 1611 } 1612 /* ELM module available, now configure it */ 1613 info->elm_dev = &pdev->dev; 1614 return true; 1615} 1616 1617static bool omap2_nand_ecc_check(struct omap_nand_info *info, 1618 struct omap_nand_platform_data *pdata) 1619{ 1620 bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; 1621 1622 switch (info->ecc_opt) { 1623 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1624 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1625 ecc_needs_omap_bch = false; 1626 ecc_needs_bch = true; 1627 ecc_needs_elm = false; 1628 break; 1629 case OMAP_ECC_BCH4_CODE_HW: 1630 case OMAP_ECC_BCH8_CODE_HW: 1631 case OMAP_ECC_BCH16_CODE_HW: 1632 ecc_needs_omap_bch = true; 1633 ecc_needs_bch = false; 1634 ecc_needs_elm = true; 1635 break; 1636 default: 1637 ecc_needs_omap_bch = false; 1638 ecc_needs_bch = false; 1639 ecc_needs_elm = false; 1640 break; 1641 } 1642 1643 if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_BCH)) { 1644 dev_err(&info->pdev->dev, 1645 "CONFIG_MTD_NAND_ECC_BCH not enabled\n"); 1646 return false; 1647 } 1648 if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) { 1649 dev_err(&info->pdev->dev, 1650 "CONFIG_MTD_NAND_OMAP_BCH not enabled\n"); 1651 return false; 1652 } 1653 if (ecc_needs_elm && !is_elm_present(info, pdata->elm_of_node)) { 1654 dev_err(&info->pdev->dev, "ELM not available\n"); 1655 return false; 1656 } 1657 1658 return true; 1659} 1660 1661static int omap_nand_probe(struct platform_device *pdev) 1662{ 1663 struct omap_nand_info *info; 1664 struct omap_nand_platform_data *pdata; 1665 struct mtd_info *mtd; 1666 struct nand_chip *nand_chip; 1667 struct nand_ecclayout *ecclayout; 1668 int err; 1669 int i; 1670 dma_cap_mask_t mask; 1671 unsigned sig; 1672 unsigned oob_index; 1673 struct resource *res; 1674 struct mtd_part_parser_data ppdata = {}; 1675 1676 pdata = dev_get_platdata(&pdev->dev); 1677 if (pdata == NULL) { 1678 dev_err(&pdev->dev, "platform data missing\n"); 1679 return -ENODEV; 1680 } 1681 1682 info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info), 1683 GFP_KERNEL); 1684 if (!info) 1685 return -ENOMEM; 1686 1687 platform_set_drvdata(pdev, info); 1688 1689 spin_lock_init(&info->controller.lock); 1690 init_waitqueue_head(&info->controller.wq); 1691 1692 info->pdev = pdev; 1693 info->gpmc_cs = pdata->cs; 1694 info->reg = pdata->reg; 1695 info->of_node = pdata->of_node; 1696 info->ecc_opt = pdata->ecc_opt; 1697 mtd = &info->mtd; 1698 mtd->priv = &info->nand; 1699 mtd->name = dev_name(&pdev->dev); 1700 mtd->owner = THIS_MODULE; 1701 nand_chip = &info->nand; 1702 nand_chip->ecc.priv = NULL; 1703 1704 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1705 nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); 1706 if (IS_ERR(nand_chip->IO_ADDR_R)) 1707 return PTR_ERR(nand_chip->IO_ADDR_R); 1708 1709 info->phys_base = res->start; 1710 1711 nand_chip->controller = &info->controller; 1712 1713 nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R; 1714 nand_chip->cmd_ctrl = omap_hwcontrol; 1715 1716 /* 1717 * If RDY/BSY line is connected to OMAP then use the omap ready 1718 * function and the generic nand_wait function which reads the status 1719 * register after monitoring the RDY/BSY line. Otherwise use a standard 1720 * chip delay which is slightly more than tR (AC Timing) of the NAND 1721 * device and read status register until you get a failure or success 1722 */ 1723 if (pdata->dev_ready) { 1724 nand_chip->dev_ready = omap_dev_ready; 1725 nand_chip->chip_delay = 0; 1726 } else { 1727 nand_chip->waitfunc = omap_wait; 1728 nand_chip->chip_delay = 50; 1729 } 1730 1731 if (pdata->flash_bbt) 1732 nand_chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1733 else 1734 nand_chip->options |= NAND_SKIP_BBTSCAN; 1735 1736 /* scan NAND device connected to chip controller */ 1737 nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16; 1738 if (nand_scan_ident(mtd, 1, NULL)) { 1739 dev_err(&info->pdev->dev, "scan failed, may be bus-width mismatch\n"); 1740 err = -ENXIO; 1741 goto return_error; 1742 } 1743 1744 /* check for small page devices */ 1745 if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) { 1746 dev_err(&info->pdev->dev, "small page devices are not supported\n"); 1747 err = -EINVAL; 1748 goto return_error; 1749 } 1750 1751 /* re-populate low-level callbacks based on xfer modes */ 1752 switch (pdata->xfer_type) { 1753 case NAND_OMAP_PREFETCH_POLLED: 1754 nand_chip->read_buf = omap_read_buf_pref; 1755 nand_chip->write_buf = omap_write_buf_pref; 1756 break; 1757 1758 case NAND_OMAP_POLLED: 1759 /* Use nand_base defaults for {read,write}_buf */ 1760 break; 1761 1762 case NAND_OMAP_PREFETCH_DMA: 1763 dma_cap_zero(mask); 1764 dma_cap_set(DMA_SLAVE, mask); 1765 sig = OMAP24XX_DMA_GPMC; 1766 info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); 1767 if (!info->dma) { 1768 dev_err(&pdev->dev, "DMA engine request failed\n"); 1769 err = -ENXIO; 1770 goto return_error; 1771 } else { 1772 struct dma_slave_config cfg; 1773 1774 memset(&cfg, 0, sizeof(cfg)); 1775 cfg.src_addr = info->phys_base; 1776 cfg.dst_addr = info->phys_base; 1777 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1778 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1779 cfg.src_maxburst = 16; 1780 cfg.dst_maxburst = 16; 1781 err = dmaengine_slave_config(info->dma, &cfg); 1782 if (err) { 1783 dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", 1784 err); 1785 goto return_error; 1786 } 1787 nand_chip->read_buf = omap_read_buf_dma_pref; 1788 nand_chip->write_buf = omap_write_buf_dma_pref; 1789 } 1790 break; 1791 1792 case NAND_OMAP_PREFETCH_IRQ: 1793 info->gpmc_irq_fifo = platform_get_irq(pdev, 0); 1794 if (info->gpmc_irq_fifo <= 0) { 1795 dev_err(&pdev->dev, "error getting fifo irq\n"); 1796 err = -ENODEV; 1797 goto return_error; 1798 } 1799 err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo, 1800 omap_nand_irq, IRQF_SHARED, 1801 "gpmc-nand-fifo", info); 1802 if (err) { 1803 dev_err(&pdev->dev, "requesting irq(%d) error:%d", 1804 info->gpmc_irq_fifo, err); 1805 info->gpmc_irq_fifo = 0; 1806 goto return_error; 1807 } 1808 1809 info->gpmc_irq_count = platform_get_irq(pdev, 1); 1810 if (info->gpmc_irq_count <= 0) { 1811 dev_err(&pdev->dev, "error getting count irq\n"); 1812 err = -ENODEV; 1813 goto return_error; 1814 } 1815 err = devm_request_irq(&pdev->dev, info->gpmc_irq_count, 1816 omap_nand_irq, IRQF_SHARED, 1817 "gpmc-nand-count", info); 1818 if (err) { 1819 dev_err(&pdev->dev, "requesting irq(%d) error:%d", 1820 info->gpmc_irq_count, err); 1821 info->gpmc_irq_count = 0; 1822 goto return_error; 1823 } 1824 1825 nand_chip->read_buf = omap_read_buf_irq_pref; 1826 nand_chip->write_buf = omap_write_buf_irq_pref; 1827 1828 break; 1829 1830 default: 1831 dev_err(&pdev->dev, 1832 "xfer_type(%d) not supported!\n", pdata->xfer_type); 1833 err = -EINVAL; 1834 goto return_error; 1835 } 1836 1837 if (!omap2_nand_ecc_check(info, pdata)) { 1838 err = -EINVAL; 1839 goto return_error; 1840 } 1841 1842 /* populate MTD interface based on ECC scheme */ 1843 ecclayout = &omap_oobinfo; 1844 switch (info->ecc_opt) { 1845 case OMAP_ECC_HAM1_CODE_SW: 1846 nand_chip->ecc.mode = NAND_ECC_SOFT; 1847 break; 1848 1849 case OMAP_ECC_HAM1_CODE_HW: 1850 pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n"); 1851 nand_chip->ecc.mode = NAND_ECC_HW; 1852 nand_chip->ecc.bytes = 3; 1853 nand_chip->ecc.size = 512; 1854 nand_chip->ecc.strength = 1; 1855 nand_chip->ecc.calculate = omap_calculate_ecc; 1856 nand_chip->ecc.hwctl = omap_enable_hwecc; 1857 nand_chip->ecc.correct = omap_correct_data; 1858 /* define ECC layout */ 1859 ecclayout->eccbytes = nand_chip->ecc.bytes * 1860 (mtd->writesize / 1861 nand_chip->ecc.size); 1862 if (nand_chip->options & NAND_BUSWIDTH_16) 1863 oob_index = BADBLOCK_MARKER_LENGTH; 1864 else 1865 oob_index = 1; 1866 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 1867 ecclayout->eccpos[i] = oob_index; 1868 /* no reserved-marker in ecclayout for this ecc-scheme */ 1869 ecclayout->oobfree->offset = 1870 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1871 break; 1872 1873 case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: 1874 pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n"); 1875 nand_chip->ecc.mode = NAND_ECC_HW; 1876 nand_chip->ecc.size = 512; 1877 nand_chip->ecc.bytes = 7; 1878 nand_chip->ecc.strength = 4; 1879 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 1880 nand_chip->ecc.correct = nand_bch_correct_data; 1881 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 1882 /* define ECC layout */ 1883 ecclayout->eccbytes = nand_chip->ecc.bytes * 1884 (mtd->writesize / 1885 nand_chip->ecc.size); 1886 oob_index = BADBLOCK_MARKER_LENGTH; 1887 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { 1888 ecclayout->eccpos[i] = oob_index; 1889 if (((i + 1) % nand_chip->ecc.bytes) == 0) 1890 oob_index++; 1891 } 1892 /* include reserved-marker in ecclayout->oobfree calculation */ 1893 ecclayout->oobfree->offset = 1 + 1894 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1895 /* software bch library is used for locating errors */ 1896 nand_chip->ecc.priv = nand_bch_init(mtd, 1897 nand_chip->ecc.size, 1898 nand_chip->ecc.bytes, 1899 &ecclayout); 1900 if (!nand_chip->ecc.priv) { 1901 dev_err(&info->pdev->dev, "unable to use BCH library\n"); 1902 err = -EINVAL; 1903 goto return_error; 1904 } 1905 break; 1906 1907 case OMAP_ECC_BCH4_CODE_HW: 1908 pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n"); 1909 nand_chip->ecc.mode = NAND_ECC_HW; 1910 nand_chip->ecc.size = 512; 1911 /* 14th bit is kept reserved for ROM-code compatibility */ 1912 nand_chip->ecc.bytes = 7 + 1; 1913 nand_chip->ecc.strength = 4; 1914 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 1915 nand_chip->ecc.correct = omap_elm_correct_data; 1916 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 1917 nand_chip->ecc.read_page = omap_read_page_bch; 1918 nand_chip->ecc.write_page = omap_write_page_bch; 1919 /* define ECC layout */ 1920 ecclayout->eccbytes = nand_chip->ecc.bytes * 1921 (mtd->writesize / 1922 nand_chip->ecc.size); 1923 oob_index = BADBLOCK_MARKER_LENGTH; 1924 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 1925 ecclayout->eccpos[i] = oob_index; 1926 /* reserved marker already included in ecclayout->eccbytes */ 1927 ecclayout->oobfree->offset = 1928 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1929 1930 err = elm_config(info->elm_dev, BCH4_ECC, 1931 info->mtd.writesize / nand_chip->ecc.size, 1932 nand_chip->ecc.size, nand_chip->ecc.bytes); 1933 if (err < 0) 1934 goto return_error; 1935 break; 1936 1937 case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: 1938 pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n"); 1939 nand_chip->ecc.mode = NAND_ECC_HW; 1940 nand_chip->ecc.size = 512; 1941 nand_chip->ecc.bytes = 13; 1942 nand_chip->ecc.strength = 8; 1943 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 1944 nand_chip->ecc.correct = nand_bch_correct_data; 1945 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 1946 /* define ECC layout */ 1947 ecclayout->eccbytes = nand_chip->ecc.bytes * 1948 (mtd->writesize / 1949 nand_chip->ecc.size); 1950 oob_index = BADBLOCK_MARKER_LENGTH; 1951 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) { 1952 ecclayout->eccpos[i] = oob_index; 1953 if (((i + 1) % nand_chip->ecc.bytes) == 0) 1954 oob_index++; 1955 } 1956 /* include reserved-marker in ecclayout->oobfree calculation */ 1957 ecclayout->oobfree->offset = 1 + 1958 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 1959 /* software bch library is used for locating errors */ 1960 nand_chip->ecc.priv = nand_bch_init(mtd, 1961 nand_chip->ecc.size, 1962 nand_chip->ecc.bytes, 1963 &ecclayout); 1964 if (!nand_chip->ecc.priv) { 1965 dev_err(&info->pdev->dev, "unable to use BCH library\n"); 1966 err = -EINVAL; 1967 goto return_error; 1968 } 1969 break; 1970 1971 case OMAP_ECC_BCH8_CODE_HW: 1972 pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n"); 1973 nand_chip->ecc.mode = NAND_ECC_HW; 1974 nand_chip->ecc.size = 512; 1975 /* 14th bit is kept reserved for ROM-code compatibility */ 1976 nand_chip->ecc.bytes = 13 + 1; 1977 nand_chip->ecc.strength = 8; 1978 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 1979 nand_chip->ecc.correct = omap_elm_correct_data; 1980 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 1981 nand_chip->ecc.read_page = omap_read_page_bch; 1982 nand_chip->ecc.write_page = omap_write_page_bch; 1983 1984 err = elm_config(info->elm_dev, BCH8_ECC, 1985 info->mtd.writesize / nand_chip->ecc.size, 1986 nand_chip->ecc.size, nand_chip->ecc.bytes); 1987 if (err < 0) 1988 goto return_error; 1989 1990 /* define ECC layout */ 1991 ecclayout->eccbytes = nand_chip->ecc.bytes * 1992 (mtd->writesize / 1993 nand_chip->ecc.size); 1994 oob_index = BADBLOCK_MARKER_LENGTH; 1995 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 1996 ecclayout->eccpos[i] = oob_index; 1997 /* reserved marker already included in ecclayout->eccbytes */ 1998 ecclayout->oobfree->offset = 1999 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 2000 break; 2001 2002 case OMAP_ECC_BCH16_CODE_HW: 2003 pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n"); 2004 nand_chip->ecc.mode = NAND_ECC_HW; 2005 nand_chip->ecc.size = 512; 2006 nand_chip->ecc.bytes = 26; 2007 nand_chip->ecc.strength = 16; 2008 nand_chip->ecc.hwctl = omap_enable_hwecc_bch; 2009 nand_chip->ecc.correct = omap_elm_correct_data; 2010 nand_chip->ecc.calculate = omap_calculate_ecc_bch; 2011 nand_chip->ecc.read_page = omap_read_page_bch; 2012 nand_chip->ecc.write_page = omap_write_page_bch; 2013 2014 err = elm_config(info->elm_dev, BCH16_ECC, 2015 info->mtd.writesize / nand_chip->ecc.size, 2016 nand_chip->ecc.size, nand_chip->ecc.bytes); 2017 if (err < 0) 2018 goto return_error; 2019 2020 /* define ECC layout */ 2021 ecclayout->eccbytes = nand_chip->ecc.bytes * 2022 (mtd->writesize / 2023 nand_chip->ecc.size); 2024 oob_index = BADBLOCK_MARKER_LENGTH; 2025 for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) 2026 ecclayout->eccpos[i] = oob_index; 2027 /* reserved marker already included in ecclayout->eccbytes */ 2028 ecclayout->oobfree->offset = 2029 ecclayout->eccpos[ecclayout->eccbytes - 1] + 1; 2030 break; 2031 default: 2032 dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n"); 2033 err = -EINVAL; 2034 goto return_error; 2035 } 2036 2037 if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) 2038 goto scan_tail; 2039 2040 /* all OOB bytes from oobfree->offset till end off OOB are free */ 2041 ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset; 2042 /* check if NAND device's OOB is enough to store ECC signatures */ 2043 if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) { 2044 dev_err(&info->pdev->dev, 2045 "not enough OOB bytes required = %d, available=%d\n", 2046 ecclayout->eccbytes, mtd->oobsize); 2047 err = -EINVAL; 2048 goto return_error; 2049 } 2050 nand_chip->ecc.layout = ecclayout; 2051 2052scan_tail: 2053 /* second phase scan */ 2054 if (nand_scan_tail(mtd)) { 2055 err = -ENXIO; 2056 goto return_error; 2057 } 2058 2059 ppdata.of_node = pdata->of_node; 2060 mtd_device_parse_register(mtd, NULL, &ppdata, pdata->parts, 2061 pdata->nr_parts); 2062 2063 platform_set_drvdata(pdev, mtd); 2064 2065 return 0; 2066 2067return_error: 2068 if (info->dma) 2069 dma_release_channel(info->dma); 2070 if (nand_chip->ecc.priv) { 2071 nand_bch_free(nand_chip->ecc.priv); 2072 nand_chip->ecc.priv = NULL; 2073 } 2074 return err; 2075} 2076 2077static int omap_nand_remove(struct platform_device *pdev) 2078{ 2079 struct mtd_info *mtd = platform_get_drvdata(pdev); 2080 struct nand_chip *nand_chip = mtd->priv; 2081 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 2082 mtd); 2083 if (nand_chip->ecc.priv) { 2084 nand_bch_free(nand_chip->ecc.priv); 2085 nand_chip->ecc.priv = NULL; 2086 } 2087 if (info->dma) 2088 dma_release_channel(info->dma); 2089 nand_release(mtd); 2090 return 0; 2091} 2092 2093static struct platform_driver omap_nand_driver = { 2094 .probe = omap_nand_probe, 2095 .remove = omap_nand_remove, 2096 .driver = { 2097 .name = DRIVER_NAME, 2098 .owner = THIS_MODULE, 2099 }, 2100}; 2101 2102module_platform_driver(omap_nand_driver); 2103 2104MODULE_ALIAS("platform:" DRIVER_NAME); 2105MODULE_LICENSE("GPL"); 2106MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); 2107