1/* 2 * omap iommu: tlb and pagetable primitives 3 * 4 * Copyright (C) 2008-2010 Nokia Corporation 5 * 6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, 7 * Paul Mundt and Toshihiro Kobayashi 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/err.h> 15#include <linux/module.h> 16#include <linux/slab.h> 17#include <linux/interrupt.h> 18#include <linux/ioport.h> 19#include <linux/platform_device.h> 20#include <linux/iommu.h> 21#include <linux/omap-iommu.h> 22#include <linux/mutex.h> 23#include <linux/spinlock.h> 24#include <linux/io.h> 25#include <linux/pm_runtime.h> 26#include <linux/of.h> 27#include <linux/of_iommu.h> 28#include <linux/of_irq.h> 29#include <linux/of_platform.h> 30 31#include <asm/cacheflush.h> 32 33#include <linux/platform_data/iommu-omap.h> 34 35#include "omap-iopgtable.h" 36#include "omap-iommu.h" 37 38#define to_iommu(dev) \ 39 ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) 40 41#define for_each_iotlb_cr(obj, n, __i, cr) \ 42 for (__i = 0; \ 43 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \ 44 __i++) 45 46/* bitmap of the page sizes currently supported */ 47#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) 48 49/** 50 * struct omap_iommu_domain - omap iommu domain 51 * @pgtable: the page table 52 * @iommu_dev: an omap iommu device attached to this domain. only a single 53 * iommu device can be attached for now. 54 * @dev: Device using this domain. 55 * @lock: domain lock, should be taken when attaching/detaching 56 */ 57struct omap_iommu_domain { 58 u32 *pgtable; 59 struct omap_iommu *iommu_dev; 60 struct device *dev; 61 spinlock_t lock; 62}; 63 64#define MMU_LOCK_BASE_SHIFT 10 65#define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT) 66#define MMU_LOCK_BASE(x) \ 67 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT) 68 69#define MMU_LOCK_VICT_SHIFT 4 70#define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT) 71#define MMU_LOCK_VICT(x) \ 72 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) 73 74struct iotlb_lock { 75 short base; 76 short vict; 77}; 78 79/* accommodate the difference between omap1 and omap2/3 */ 80static const struct iommu_functions *arch_iommu; 81 82static struct platform_driver omap_iommu_driver; 83static struct kmem_cache *iopte_cachep; 84 85/** 86 * omap_install_iommu_arch - Install archtecure specific iommu functions 87 * @ops: a pointer to architecture specific iommu functions 88 * 89 * There are several kind of iommu algorithm(tlb, pagetable) among 90 * omap series. This interface installs such an iommu algorighm. 91 **/ 92int omap_install_iommu_arch(const struct iommu_functions *ops) 93{ 94 if (arch_iommu) 95 return -EBUSY; 96 97 arch_iommu = ops; 98 return 0; 99} 100EXPORT_SYMBOL_GPL(omap_install_iommu_arch); 101 102/** 103 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions 104 * @ops: a pointer to architecture specific iommu functions 105 * 106 * This interface uninstalls the iommu algorighm installed previously. 107 **/ 108void omap_uninstall_iommu_arch(const struct iommu_functions *ops) 109{ 110 if (arch_iommu != ops) 111 pr_err("%s: not your arch\n", __func__); 112 113 arch_iommu = NULL; 114} 115EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch); 116 117/** 118 * omap_iommu_save_ctx - Save registers for pm off-mode support 119 * @dev: client device 120 **/ 121void omap_iommu_save_ctx(struct device *dev) 122{ 123 struct omap_iommu *obj = dev_to_omap_iommu(dev); 124 125 arch_iommu->save_ctx(obj); 126} 127EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 128 129/** 130 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 131 * @dev: client device 132 **/ 133void omap_iommu_restore_ctx(struct device *dev) 134{ 135 struct omap_iommu *obj = dev_to_omap_iommu(dev); 136 137 arch_iommu->restore_ctx(obj); 138} 139EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 140 141/** 142 * omap_iommu_arch_version - Return running iommu arch version 143 **/ 144u32 omap_iommu_arch_version(void) 145{ 146 return arch_iommu->version; 147} 148EXPORT_SYMBOL_GPL(omap_iommu_arch_version); 149 150static int iommu_enable(struct omap_iommu *obj) 151{ 152 int err; 153 struct platform_device *pdev = to_platform_device(obj->dev); 154 struct iommu_platform_data *pdata = pdev->dev.platform_data; 155 156 if (!arch_iommu) 157 return -ENODEV; 158 159 if (pdata && pdata->deassert_reset) { 160 err = pdata->deassert_reset(pdev, pdata->reset_name); 161 if (err) { 162 dev_err(obj->dev, "deassert_reset failed: %d\n", err); 163 return err; 164 } 165 } 166 167 pm_runtime_get_sync(obj->dev); 168 169 err = arch_iommu->enable(obj); 170 171 return err; 172} 173 174static void iommu_disable(struct omap_iommu *obj) 175{ 176 struct platform_device *pdev = to_platform_device(obj->dev); 177 struct iommu_platform_data *pdata = pdev->dev.platform_data; 178 179 arch_iommu->disable(obj); 180 181 pm_runtime_put_sync(obj->dev); 182 183 if (pdata && pdata->assert_reset) 184 pdata->assert_reset(pdev, pdata->reset_name); 185} 186 187/* 188 * TLB operations 189 */ 190void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) 191{ 192 BUG_ON(!cr || !e); 193 194 arch_iommu->cr_to_e(cr, e); 195} 196EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e); 197 198static inline int iotlb_cr_valid(struct cr_regs *cr) 199{ 200 if (!cr) 201 return -EINVAL; 202 203 return arch_iommu->cr_valid(cr); 204} 205 206static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj, 207 struct iotlb_entry *e) 208{ 209 if (!e) 210 return NULL; 211 212 return arch_iommu->alloc_cr(obj, e); 213} 214 215static u32 iotlb_cr_to_virt(struct cr_regs *cr) 216{ 217 return arch_iommu->cr_to_virt(cr); 218} 219 220static u32 get_iopte_attr(struct iotlb_entry *e) 221{ 222 return arch_iommu->get_pte_attr(e); 223} 224 225static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 226{ 227 return arch_iommu->fault_isr(obj, da); 228} 229 230static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 231{ 232 u32 val; 233 234 val = iommu_read_reg(obj, MMU_LOCK); 235 236 l->base = MMU_LOCK_BASE(val); 237 l->vict = MMU_LOCK_VICT(val); 238 239} 240 241static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) 242{ 243 u32 val; 244 245 val = (l->base << MMU_LOCK_BASE_SHIFT); 246 val |= (l->vict << MMU_LOCK_VICT_SHIFT); 247 248 iommu_write_reg(obj, val, MMU_LOCK); 249} 250 251static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 252{ 253 arch_iommu->tlb_read_cr(obj, cr); 254} 255 256static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 257{ 258 arch_iommu->tlb_load_cr(obj, cr); 259 260 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 261 iommu_write_reg(obj, 1, MMU_LD_TLB); 262} 263 264/** 265 * iotlb_dump_cr - Dump an iommu tlb entry into buf 266 * @obj: target iommu 267 * @cr: contents of cam and ram register 268 * @buf: output buffer 269 **/ 270static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, 271 char *buf) 272{ 273 BUG_ON(!cr || !buf); 274 275 return arch_iommu->dump_cr(obj, cr, buf); 276} 277 278/* only used in iotlb iteration for-loop */ 279static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 280{ 281 struct cr_regs cr; 282 struct iotlb_lock l; 283 284 iotlb_lock_get(obj, &l); 285 l.vict = n; 286 iotlb_lock_set(obj, &l); 287 iotlb_read_cr(obj, &cr); 288 289 return cr; 290} 291 292/** 293 * load_iotlb_entry - Set an iommu tlb entry 294 * @obj: target iommu 295 * @e: an iommu tlb entry info 296 **/ 297#ifdef PREFETCH_IOTLB 298static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 299{ 300 int err = 0; 301 struct iotlb_lock l; 302 struct cr_regs *cr; 303 304 if (!obj || !obj->nr_tlb_entries || !e) 305 return -EINVAL; 306 307 pm_runtime_get_sync(obj->dev); 308 309 iotlb_lock_get(obj, &l); 310 if (l.base == obj->nr_tlb_entries) { 311 dev_warn(obj->dev, "%s: preserve entries full\n", __func__); 312 err = -EBUSY; 313 goto out; 314 } 315 if (!e->prsvd) { 316 int i; 317 struct cr_regs tmp; 318 319 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp) 320 if (!iotlb_cr_valid(&tmp)) 321 break; 322 323 if (i == obj->nr_tlb_entries) { 324 dev_dbg(obj->dev, "%s: full: no entry\n", __func__); 325 err = -EBUSY; 326 goto out; 327 } 328 329 iotlb_lock_get(obj, &l); 330 } else { 331 l.vict = l.base; 332 iotlb_lock_set(obj, &l); 333 } 334 335 cr = iotlb_alloc_cr(obj, e); 336 if (IS_ERR(cr)) { 337 pm_runtime_put_sync(obj->dev); 338 return PTR_ERR(cr); 339 } 340 341 iotlb_load_cr(obj, cr); 342 kfree(cr); 343 344 if (e->prsvd) 345 l.base++; 346 /* increment victim for next tlb load */ 347 if (++l.vict == obj->nr_tlb_entries) 348 l.vict = l.base; 349 iotlb_lock_set(obj, &l); 350out: 351 pm_runtime_put_sync(obj->dev); 352 return err; 353} 354 355#else /* !PREFETCH_IOTLB */ 356 357static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 358{ 359 return 0; 360} 361 362#endif /* !PREFETCH_IOTLB */ 363 364static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 365{ 366 return load_iotlb_entry(obj, e); 367} 368 369/** 370 * flush_iotlb_page - Clear an iommu tlb entry 371 * @obj: target iommu 372 * @da: iommu device virtual address 373 * 374 * Clear an iommu tlb entry which includes 'da' address. 375 **/ 376static void flush_iotlb_page(struct omap_iommu *obj, u32 da) 377{ 378 int i; 379 struct cr_regs cr; 380 381 pm_runtime_get_sync(obj->dev); 382 383 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) { 384 u32 start; 385 size_t bytes; 386 387 if (!iotlb_cr_valid(&cr)) 388 continue; 389 390 start = iotlb_cr_to_virt(&cr); 391 bytes = iopgsz_to_bytes(cr.cam & 3); 392 393 if ((start <= da) && (da < start + bytes)) { 394 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", 395 __func__, start, da, bytes); 396 iotlb_load_cr(obj, &cr); 397 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 398 break; 399 } 400 } 401 pm_runtime_put_sync(obj->dev); 402 403 if (i == obj->nr_tlb_entries) 404 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); 405} 406 407/** 408 * flush_iotlb_all - Clear all iommu tlb entries 409 * @obj: target iommu 410 **/ 411static void flush_iotlb_all(struct omap_iommu *obj) 412{ 413 struct iotlb_lock l; 414 415 pm_runtime_get_sync(obj->dev); 416 417 l.base = 0; 418 l.vict = 0; 419 iotlb_lock_set(obj, &l); 420 421 iommu_write_reg(obj, 1, MMU_GFLUSH); 422 423 pm_runtime_put_sync(obj->dev); 424} 425 426#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 427 428ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) 429{ 430 if (!obj || !buf) 431 return -EINVAL; 432 433 pm_runtime_get_sync(obj->dev); 434 435 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 436 437 pm_runtime_put_sync(obj->dev); 438 439 return bytes; 440} 441EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx); 442 443static int 444__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) 445{ 446 int i; 447 struct iotlb_lock saved; 448 struct cr_regs tmp; 449 struct cr_regs *p = crs; 450 451 pm_runtime_get_sync(obj->dev); 452 iotlb_lock_get(obj, &saved); 453 454 for_each_iotlb_cr(obj, num, i, tmp) { 455 if (!iotlb_cr_valid(&tmp)) 456 continue; 457 *p++ = tmp; 458 } 459 460 iotlb_lock_set(obj, &saved); 461 pm_runtime_put_sync(obj->dev); 462 463 return p - crs; 464} 465 466/** 467 * omap_dump_tlb_entries - dump cr arrays to given buffer 468 * @obj: target iommu 469 * @buf: output buffer 470 **/ 471size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes) 472{ 473 int i, num; 474 struct cr_regs *cr; 475 char *p = buf; 476 477 num = bytes / sizeof(*cr); 478 num = min(obj->nr_tlb_entries, num); 479 480 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); 481 if (!cr) 482 return 0; 483 484 num = __dump_tlb_entries(obj, cr, num); 485 for (i = 0; i < num; i++) 486 p += iotlb_dump_cr(obj, cr + i, p); 487 kfree(cr); 488 489 return p - buf; 490} 491EXPORT_SYMBOL_GPL(omap_dump_tlb_entries); 492 493int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) 494{ 495 return driver_for_each_device(&omap_iommu_driver.driver, 496 NULL, data, fn); 497} 498EXPORT_SYMBOL_GPL(omap_foreach_iommu_device); 499 500#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ 501 502/* 503 * H/W pagetable operations 504 */ 505static void flush_iopgd_range(u32 *first, u32 *last) 506{ 507 /* FIXME: L2 cache should be taken care of if it exists */ 508 do { 509 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" 510 : : "r" (first)); 511 first += L1_CACHE_BYTES / sizeof(*first); 512 } while (first <= last); 513} 514 515static void flush_iopte_range(u32 *first, u32 *last) 516{ 517 /* FIXME: L2 cache should be taken care of if it exists */ 518 do { 519 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" 520 : : "r" (first)); 521 first += L1_CACHE_BYTES / sizeof(*first); 522 } while (first <= last); 523} 524 525static void iopte_free(u32 *iopte) 526{ 527 /* Note: freed iopte's must be clean ready for re-use */ 528 if (iopte) 529 kmem_cache_free(iopte_cachep, iopte); 530} 531 532static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) 533{ 534 u32 *iopte; 535 536 /* a table has already existed */ 537 if (*iopgd) 538 goto pte_ready; 539 540 /* 541 * do the allocation outside the page table lock 542 */ 543 spin_unlock(&obj->page_table_lock); 544 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); 545 spin_lock(&obj->page_table_lock); 546 547 if (!*iopgd) { 548 if (!iopte) 549 return ERR_PTR(-ENOMEM); 550 551 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; 552 flush_iopgd_range(iopgd, iopgd); 553 554 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); 555 } else { 556 /* We raced, free the reduniovant table */ 557 iopte_free(iopte); 558 } 559 560pte_ready: 561 iopte = iopte_offset(iopgd, da); 562 563 dev_vdbg(obj->dev, 564 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", 565 __func__, da, iopgd, *iopgd, iopte, *iopte); 566 567 return iopte; 568} 569 570static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 571{ 572 u32 *iopgd = iopgd_offset(obj, da); 573 574 if ((da | pa) & ~IOSECTION_MASK) { 575 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 576 __func__, da, pa, IOSECTION_SIZE); 577 return -EINVAL; 578 } 579 580 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; 581 flush_iopgd_range(iopgd, iopgd); 582 return 0; 583} 584 585static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 586{ 587 u32 *iopgd = iopgd_offset(obj, da); 588 int i; 589 590 if ((da | pa) & ~IOSUPER_MASK) { 591 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 592 __func__, da, pa, IOSUPER_SIZE); 593 return -EINVAL; 594 } 595 596 for (i = 0; i < 16; i++) 597 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; 598 flush_iopgd_range(iopgd, iopgd + 15); 599 return 0; 600} 601 602static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 603{ 604 u32 *iopgd = iopgd_offset(obj, da); 605 u32 *iopte = iopte_alloc(obj, iopgd, da); 606 607 if (IS_ERR(iopte)) 608 return PTR_ERR(iopte); 609 610 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; 611 flush_iopte_range(iopte, iopte); 612 613 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", 614 __func__, da, pa, iopte, *iopte); 615 616 return 0; 617} 618 619static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) 620{ 621 u32 *iopgd = iopgd_offset(obj, da); 622 u32 *iopte = iopte_alloc(obj, iopgd, da); 623 int i; 624 625 if ((da | pa) & ~IOLARGE_MASK) { 626 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n", 627 __func__, da, pa, IOLARGE_SIZE); 628 return -EINVAL; 629 } 630 631 if (IS_ERR(iopte)) 632 return PTR_ERR(iopte); 633 634 for (i = 0; i < 16; i++) 635 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; 636 flush_iopte_range(iopte, iopte + 15); 637 return 0; 638} 639 640static int 641iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e) 642{ 643 int (*fn)(struct omap_iommu *, u32, u32, u32); 644 u32 prot; 645 int err; 646 647 if (!obj || !e) 648 return -EINVAL; 649 650 switch (e->pgsz) { 651 case MMU_CAM_PGSZ_16M: 652 fn = iopgd_alloc_super; 653 break; 654 case MMU_CAM_PGSZ_1M: 655 fn = iopgd_alloc_section; 656 break; 657 case MMU_CAM_PGSZ_64K: 658 fn = iopte_alloc_large; 659 break; 660 case MMU_CAM_PGSZ_4K: 661 fn = iopte_alloc_page; 662 break; 663 default: 664 fn = NULL; 665 BUG(); 666 break; 667 } 668 669 prot = get_iopte_attr(e); 670 671 spin_lock(&obj->page_table_lock); 672 err = fn(obj, e->da, e->pa, prot); 673 spin_unlock(&obj->page_table_lock); 674 675 return err; 676} 677 678/** 679 * omap_iopgtable_store_entry - Make an iommu pte entry 680 * @obj: target iommu 681 * @e: an iommu tlb entry info 682 **/ 683int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 684{ 685 int err; 686 687 flush_iotlb_page(obj, e->da); 688 err = iopgtable_store_entry_core(obj, e); 689 if (!err) 690 prefetch_iotlb_entry(obj, e); 691 return err; 692} 693EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry); 694 695/** 696 * iopgtable_lookup_entry - Lookup an iommu pte entry 697 * @obj: target iommu 698 * @da: iommu device virtual address 699 * @ppgd: iommu pgd entry pointer to be returned 700 * @ppte: iommu pte entry pointer to be returned 701 **/ 702static void 703iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte) 704{ 705 u32 *iopgd, *iopte = NULL; 706 707 iopgd = iopgd_offset(obj, da); 708 if (!*iopgd) 709 goto out; 710 711 if (iopgd_is_table(*iopgd)) 712 iopte = iopte_offset(iopgd, da); 713out: 714 *ppgd = iopgd; 715 *ppte = iopte; 716} 717 718static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da) 719{ 720 size_t bytes; 721 u32 *iopgd = iopgd_offset(obj, da); 722 int nent = 1; 723 724 if (!*iopgd) 725 return 0; 726 727 if (iopgd_is_table(*iopgd)) { 728 int i; 729 u32 *iopte = iopte_offset(iopgd, da); 730 731 bytes = IOPTE_SIZE; 732 if (*iopte & IOPTE_LARGE) { 733 nent *= 16; 734 /* rewind to the 1st entry */ 735 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK)); 736 } 737 bytes *= nent; 738 memset(iopte, 0, nent * sizeof(*iopte)); 739 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); 740 741 /* 742 * do table walk to check if this table is necessary or not 743 */ 744 iopte = iopte_offset(iopgd, 0); 745 for (i = 0; i < PTRS_PER_IOPTE; i++) 746 if (iopte[i]) 747 goto out; 748 749 iopte_free(iopte); 750 nent = 1; /* for the next L1 entry */ 751 } else { 752 bytes = IOPGD_SIZE; 753 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) { 754 nent *= 16; 755 /* rewind to the 1st entry */ 756 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK)); 757 } 758 bytes *= nent; 759 } 760 memset(iopgd, 0, nent * sizeof(*iopgd)); 761 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); 762out: 763 return bytes; 764} 765 766/** 767 * iopgtable_clear_entry - Remove an iommu pte entry 768 * @obj: target iommu 769 * @da: iommu device virtual address 770 **/ 771static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da) 772{ 773 size_t bytes; 774 775 spin_lock(&obj->page_table_lock); 776 777 bytes = iopgtable_clear_entry_core(obj, da); 778 flush_iotlb_page(obj, da); 779 780 spin_unlock(&obj->page_table_lock); 781 782 return bytes; 783} 784 785static void iopgtable_clear_entry_all(struct omap_iommu *obj) 786{ 787 int i; 788 789 spin_lock(&obj->page_table_lock); 790 791 for (i = 0; i < PTRS_PER_IOPGD; i++) { 792 u32 da; 793 u32 *iopgd; 794 795 da = i << IOPGD_SHIFT; 796 iopgd = iopgd_offset(obj, da); 797 798 if (!*iopgd) 799 continue; 800 801 if (iopgd_is_table(*iopgd)) 802 iopte_free(iopte_offset(iopgd, 0)); 803 804 *iopgd = 0; 805 flush_iopgd_range(iopgd, iopgd); 806 } 807 808 flush_iotlb_all(obj); 809 810 spin_unlock(&obj->page_table_lock); 811} 812 813/* 814 * Device IOMMU generic operations 815 */ 816static irqreturn_t iommu_fault_handler(int irq, void *data) 817{ 818 u32 da, errs; 819 u32 *iopgd, *iopte; 820 struct omap_iommu *obj = data; 821 struct iommu_domain *domain = obj->domain; 822 823 if (!obj->refcount) 824 return IRQ_NONE; 825 826 errs = iommu_report_fault(obj, &da); 827 if (errs == 0) 828 return IRQ_HANDLED; 829 830 /* Fault callback or TLB/PTE Dynamic loading */ 831 if (!report_iommu_fault(domain, obj->dev, da, 0)) 832 return IRQ_HANDLED; 833 834 iommu_disable(obj); 835 836 iopgd = iopgd_offset(obj, da); 837 838 if (!iopgd_is_table(*iopgd)) { 839 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", 840 obj->name, errs, da, iopgd, *iopgd); 841 return IRQ_NONE; 842 } 843 844 iopte = iopte_offset(iopgd, da); 845 846 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", 847 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); 848 849 return IRQ_NONE; 850} 851 852static int device_match_by_alias(struct device *dev, void *data) 853{ 854 struct omap_iommu *obj = to_iommu(dev); 855 const char *name = data; 856 857 pr_debug("%s: %s %s\n", __func__, obj->name, name); 858 859 return strcmp(obj->name, name) == 0; 860} 861 862/** 863 * omap_iommu_attach() - attach iommu device to an iommu domain 864 * @name: name of target omap iommu device 865 * @iopgd: page table 866 **/ 867static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) 868{ 869 int err; 870 struct device *dev; 871 struct omap_iommu *obj; 872 873 dev = driver_find_device(&omap_iommu_driver.driver, NULL, 874 (void *)name, 875 device_match_by_alias); 876 if (!dev) 877 return ERR_PTR(-ENODEV); 878 879 obj = to_iommu(dev); 880 881 spin_lock(&obj->iommu_lock); 882 883 /* an iommu device can only be attached once */ 884 if (++obj->refcount > 1) { 885 dev_err(dev, "%s: already attached!\n", obj->name); 886 err = -EBUSY; 887 goto err_enable; 888 } 889 890 obj->iopgd = iopgd; 891 err = iommu_enable(obj); 892 if (err) 893 goto err_enable; 894 flush_iotlb_all(obj); 895 896 spin_unlock(&obj->iommu_lock); 897 898 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 899 return obj; 900 901err_enable: 902 obj->refcount--; 903 spin_unlock(&obj->iommu_lock); 904 return ERR_PTR(err); 905} 906 907/** 908 * omap_iommu_detach - release iommu device 909 * @obj: target iommu 910 **/ 911static void omap_iommu_detach(struct omap_iommu *obj) 912{ 913 if (!obj || IS_ERR(obj)) 914 return; 915 916 spin_lock(&obj->iommu_lock); 917 918 if (--obj->refcount == 0) 919 iommu_disable(obj); 920 921 obj->iopgd = NULL; 922 923 spin_unlock(&obj->iommu_lock); 924 925 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 926} 927 928/* 929 * OMAP Device MMU(IOMMU) detection 930 */ 931static int omap_iommu_probe(struct platform_device *pdev) 932{ 933 int err = -ENODEV; 934 int irq; 935 struct omap_iommu *obj; 936 struct resource *res; 937 struct iommu_platform_data *pdata = pdev->dev.platform_data; 938 struct device_node *of = pdev->dev.of_node; 939 940 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 941 if (!obj) 942 return -ENOMEM; 943 944 if (of) { 945 obj->name = dev_name(&pdev->dev); 946 obj->nr_tlb_entries = 32; 947 err = of_property_read_u32(of, "ti,#tlb-entries", 948 &obj->nr_tlb_entries); 949 if (err && err != -EINVAL) 950 return err; 951 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 952 return -EINVAL; 953 if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 954 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 955 } else { 956 obj->nr_tlb_entries = pdata->nr_tlb_entries; 957 obj->name = pdata->name; 958 } 959 960 obj->dev = &pdev->dev; 961 obj->ctx = (void *)obj + sizeof(*obj); 962 963 spin_lock_init(&obj->iommu_lock); 964 spin_lock_init(&obj->page_table_lock); 965 966 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 967 obj->regbase = devm_ioremap_resource(obj->dev, res); 968 if (IS_ERR(obj->regbase)) 969 return PTR_ERR(obj->regbase); 970 971 irq = platform_get_irq(pdev, 0); 972 if (irq < 0) 973 return -ENODEV; 974 975 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, 976 dev_name(obj->dev), obj); 977 if (err < 0) 978 return err; 979 platform_set_drvdata(pdev, obj); 980 981 pm_runtime_irq_safe(obj->dev); 982 pm_runtime_enable(obj->dev); 983 984 dev_info(&pdev->dev, "%s registered\n", obj->name); 985 return 0; 986} 987 988static int omap_iommu_remove(struct platform_device *pdev) 989{ 990 struct omap_iommu *obj = platform_get_drvdata(pdev); 991 992 iopgtable_clear_entry_all(obj); 993 994 pm_runtime_disable(obj->dev); 995 996 dev_info(&pdev->dev, "%s removed\n", obj->name); 997 return 0; 998} 999 1000static const struct of_device_id omap_iommu_of_match[] = { 1001 { .compatible = "ti,omap2-iommu" }, 1002 { .compatible = "ti,omap4-iommu" }, 1003 { .compatible = "ti,dra7-iommu" }, 1004 {}, 1005}; 1006MODULE_DEVICE_TABLE(of, omap_iommu_of_match); 1007 1008static struct platform_driver omap_iommu_driver = { 1009 .probe = omap_iommu_probe, 1010 .remove = omap_iommu_remove, 1011 .driver = { 1012 .name = "omap-iommu", 1013 .of_match_table = of_match_ptr(omap_iommu_of_match), 1014 }, 1015}; 1016 1017static void iopte_cachep_ctor(void *iopte) 1018{ 1019 clean_dcache_area(iopte, IOPTE_TABLE_SIZE); 1020} 1021 1022static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) 1023{ 1024 memset(e, 0, sizeof(*e)); 1025 1026 e->da = da; 1027 e->pa = pa; 1028 e->valid = MMU_CAM_V; 1029 /* FIXME: add OMAP1 support */ 1030 e->pgsz = pgsz; 1031 e->endian = MMU_RAM_ENDIAN_LITTLE; 1032 e->elsz = MMU_RAM_ELSZ_8; 1033 e->mixed = 0; 1034 1035 return iopgsz_to_bytes(e->pgsz); 1036} 1037 1038static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, 1039 phys_addr_t pa, size_t bytes, int prot) 1040{ 1041 struct omap_iommu_domain *omap_domain = domain->priv; 1042 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1043 struct device *dev = oiommu->dev; 1044 struct iotlb_entry e; 1045 int omap_pgsz; 1046 u32 ret; 1047 1048 omap_pgsz = bytes_to_iopgsz(bytes); 1049 if (omap_pgsz < 0) { 1050 dev_err(dev, "invalid size to map: %d\n", bytes); 1051 return -EINVAL; 1052 } 1053 1054 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes); 1055 1056 iotlb_init_entry(&e, da, pa, omap_pgsz); 1057 1058 ret = omap_iopgtable_store_entry(oiommu, &e); 1059 if (ret) 1060 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret); 1061 1062 return ret; 1063} 1064 1065static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, 1066 size_t size) 1067{ 1068 struct omap_iommu_domain *omap_domain = domain->priv; 1069 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1070 struct device *dev = oiommu->dev; 1071 1072 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); 1073 1074 return iopgtable_clear_entry(oiommu, da); 1075} 1076 1077static int 1078omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) 1079{ 1080 struct omap_iommu_domain *omap_domain = domain->priv; 1081 struct omap_iommu *oiommu; 1082 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1083 int ret = 0; 1084 1085 if (!arch_data || !arch_data->name) { 1086 dev_err(dev, "device doesn't have an associated iommu\n"); 1087 return -EINVAL; 1088 } 1089 1090 spin_lock(&omap_domain->lock); 1091 1092 /* only a single device is supported per domain for now */ 1093 if (omap_domain->iommu_dev) { 1094 dev_err(dev, "iommu domain is already attached\n"); 1095 ret = -EBUSY; 1096 goto out; 1097 } 1098 1099 /* get a handle to and enable the omap iommu */ 1100 oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable); 1101 if (IS_ERR(oiommu)) { 1102 ret = PTR_ERR(oiommu); 1103 dev_err(dev, "can't get omap iommu: %d\n", ret); 1104 goto out; 1105 } 1106 1107 omap_domain->iommu_dev = arch_data->iommu_dev = oiommu; 1108 omap_domain->dev = dev; 1109 oiommu->domain = domain; 1110 1111out: 1112 spin_unlock(&omap_domain->lock); 1113 return ret; 1114} 1115 1116static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, 1117 struct device *dev) 1118{ 1119 struct omap_iommu *oiommu = dev_to_omap_iommu(dev); 1120 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1121 1122 /* only a single device is supported per domain for now */ 1123 if (omap_domain->iommu_dev != oiommu) { 1124 dev_err(dev, "invalid iommu device\n"); 1125 return; 1126 } 1127 1128 iopgtable_clear_entry_all(oiommu); 1129 1130 omap_iommu_detach(oiommu); 1131 1132 omap_domain->iommu_dev = arch_data->iommu_dev = NULL; 1133 omap_domain->dev = NULL; 1134} 1135 1136static void omap_iommu_detach_dev(struct iommu_domain *domain, 1137 struct device *dev) 1138{ 1139 struct omap_iommu_domain *omap_domain = domain->priv; 1140 1141 spin_lock(&omap_domain->lock); 1142 _omap_iommu_detach_dev(omap_domain, dev); 1143 spin_unlock(&omap_domain->lock); 1144} 1145 1146static int omap_iommu_domain_init(struct iommu_domain *domain) 1147{ 1148 struct omap_iommu_domain *omap_domain; 1149 1150 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); 1151 if (!omap_domain) { 1152 pr_err("kzalloc failed\n"); 1153 goto out; 1154 } 1155 1156 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); 1157 if (!omap_domain->pgtable) { 1158 pr_err("kzalloc failed\n"); 1159 goto fail_nomem; 1160 } 1161 1162 /* 1163 * should never fail, but please keep this around to ensure 1164 * we keep the hardware happy 1165 */ 1166 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)); 1167 1168 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); 1169 spin_lock_init(&omap_domain->lock); 1170 1171 domain->priv = omap_domain; 1172 1173 domain->geometry.aperture_start = 0; 1174 domain->geometry.aperture_end = (1ULL << 32) - 1; 1175 domain->geometry.force_aperture = true; 1176 1177 return 0; 1178 1179fail_nomem: 1180 kfree(omap_domain); 1181out: 1182 return -ENOMEM; 1183} 1184 1185static void omap_iommu_domain_destroy(struct iommu_domain *domain) 1186{ 1187 struct omap_iommu_domain *omap_domain = domain->priv; 1188 1189 domain->priv = NULL; 1190 1191 /* 1192 * An iommu device is still attached 1193 * (currently, only one device can be attached) ? 1194 */ 1195 if (omap_domain->iommu_dev) 1196 _omap_iommu_detach_dev(omap_domain, omap_domain->dev); 1197 1198 kfree(omap_domain->pgtable); 1199 kfree(omap_domain); 1200} 1201 1202static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, 1203 dma_addr_t da) 1204{ 1205 struct omap_iommu_domain *omap_domain = domain->priv; 1206 struct omap_iommu *oiommu = omap_domain->iommu_dev; 1207 struct device *dev = oiommu->dev; 1208 u32 *pgd, *pte; 1209 phys_addr_t ret = 0; 1210 1211 iopgtable_lookup_entry(oiommu, da, &pgd, &pte); 1212 1213 if (pte) { 1214 if (iopte_is_small(*pte)) 1215 ret = omap_iommu_translate(*pte, da, IOPTE_MASK); 1216 else if (iopte_is_large(*pte)) 1217 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); 1218 else 1219 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, 1220 (unsigned long long)da); 1221 } else { 1222 if (iopgd_is_section(*pgd)) 1223 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); 1224 else if (iopgd_is_super(*pgd)) 1225 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); 1226 else 1227 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, 1228 (unsigned long long)da); 1229 } 1230 1231 return ret; 1232} 1233 1234static int omap_iommu_add_device(struct device *dev) 1235{ 1236 struct omap_iommu_arch_data *arch_data; 1237 struct device_node *np; 1238 struct platform_device *pdev; 1239 1240 /* 1241 * Allocate the archdata iommu structure for DT-based devices. 1242 * 1243 * TODO: Simplify this when removing non-DT support completely from the 1244 * IOMMU users. 1245 */ 1246 if (!dev->of_node) 1247 return 0; 1248 1249 np = of_parse_phandle(dev->of_node, "iommus", 0); 1250 if (!np) 1251 return 0; 1252 1253 pdev = of_find_device_by_node(np); 1254 if (WARN_ON(!pdev)) { 1255 of_node_put(np); 1256 return -EINVAL; 1257 } 1258 1259 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); 1260 if (!arch_data) { 1261 of_node_put(np); 1262 return -ENOMEM; 1263 } 1264 1265 arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL); 1266 dev->archdata.iommu = arch_data; 1267 1268 of_node_put(np); 1269 1270 return 0; 1271} 1272 1273static void omap_iommu_remove_device(struct device *dev) 1274{ 1275 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1276 1277 if (!dev->of_node || !arch_data) 1278 return; 1279 1280 kfree(arch_data->name); 1281 kfree(arch_data); 1282} 1283 1284static const struct iommu_ops omap_iommu_ops = { 1285 .domain_init = omap_iommu_domain_init, 1286 .domain_destroy = omap_iommu_domain_destroy, 1287 .attach_dev = omap_iommu_attach_dev, 1288 .detach_dev = omap_iommu_detach_dev, 1289 .map = omap_iommu_map, 1290 .unmap = omap_iommu_unmap, 1291 .iova_to_phys = omap_iommu_iova_to_phys, 1292 .add_device = omap_iommu_add_device, 1293 .remove_device = omap_iommu_remove_device, 1294 .pgsize_bitmap = OMAP_IOMMU_PGSIZES, 1295}; 1296 1297static int __init omap_iommu_init(void) 1298{ 1299 struct kmem_cache *p; 1300 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1301 size_t align = 1 << 10; /* L2 pagetable alignement */ 1302 1303 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, 1304 iopte_cachep_ctor); 1305 if (!p) 1306 return -ENOMEM; 1307 iopte_cachep = p; 1308 1309 bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1310 1311 return platform_driver_register(&omap_iommu_driver); 1312} 1313/* must be ready before omap3isp is probed */ 1314subsys_initcall(omap_iommu_init); 1315 1316static void __exit omap_iommu_exit(void) 1317{ 1318 kmem_cache_destroy(iopte_cachep); 1319 1320 platform_driver_unregister(&omap_iommu_driver); 1321} 1322module_exit(omap_iommu_exit); 1323 1324MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); 1325MODULE_ALIAS("platform:omap-iommu"); 1326MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); 1327MODULE_LICENSE("GPL v2"); 1328