[go: nahoru, domu]

Searched refs:pfn (Results 1 - 25 of 89) sorted by relevance

1234

/drivers/xen/
H A Dtmem.c132 u32 index, unsigned long pfn)
134 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
141 u32 index, unsigned long pfn)
143 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
176 unsigned long pfn = page_to_pfn(page); local
183 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
191 unsigned long pfn = page_to_pfn(page); local
199 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
131 xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) argument
140 xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, u32 index, unsigned long pfn) argument
290 unsigned long pfn = page_to_pfn(page); local
316 unsigned long pfn = page_to_pfn(page); local
[all...]
H A Dballoon.c307 unsigned long pfn, i; local
347 pfn = page_to_pfn(page);
351 set_phys_to_machine(pfn, frame_list[i]);
357 (unsigned long)__va(pfn << PAGE_SHIFT),
377 unsigned long pfn, i; local
421 pfn = frame_list[i];
422 frame_list[i] = pfn_to_mfn(pfn);
423 page = pfn_to_page(pfn);
436 (unsigned long)__va(pfn << PAGE_SHIFT),
443 __set_phys_to_machine(pfn, INVALID_P2M_ENTR
589 unsigned long pfn, extra_pfn_end; local
[all...]
H A Dswiotlb-xen.c95 unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); local
96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
111 static int check_pages_physically_contiguous(unsigned long pfn, argument
119 next_mfn = pfn_to_mfn(pfn);
123 if (pfn_to_mfn(++pfn) != ++next_mfn)
131 unsigned long pfn = PFN_DOWN(p); local
136 if (check_pages_physically_contiguous(pfn, offset, size))
144 unsigned long pfn = mfn_to_local_pfn(mfn); local
151 if (pfn_valid(pfn)) {
152 paddr = PFN_PHYS(pfn);
[all...]
H A Dgrant-table.c323 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
371 printk("%s g.e. %#x (pfn %#lx)\n",
388 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) argument
395 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
402 unsigned long pfn)
404 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
631 xen_pfn_t *pfn; local
645 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
646 if (!pfn) {
401 gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, unsigned long pfn) argument
[all...]
/drivers/infiniband/hw/mlx5/
H A Dmem.c55 u64 pfn; local
68 pfn = sg_dma_address(sg) >> page_shift;
71 tmp = (unsigned long)pfn;
75 base = pfn;
78 if (base + p != pfn) {
83 base = pfn;
/drivers/media/v4l2-core/
H A Dvideobuf2-dma-contig.c432 unsigned long pfn, start_pfn, prev_pfn; local
439 ret = follow_pfn(vma, start, &pfn);
443 start_pfn = pfn;
447 prev_pfn = pfn;
448 ret = follow_pfn(vma, start, &pfn);
454 if (pfn != prev_pfn + 1)
469 unsigned long pfn; local
470 int ret = follow_pfn(vma, start, &pfn);
472 if (!pfn_valid(pfn))
479 pages[i] = pfn_to_page(pfn);
531 vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) argument
536 vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) argument
541 vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) argument
546 vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) argument
623 unsigned long pfn; local
[all...]
/drivers/base/
H A Dnode.c370 static int get_nid_for_pfn(unsigned long pfn) argument
374 if (!pfn_valid_within(pfn))
376 page = pfn_to_page(pfn);
379 return pfn_to_nid(pfn);
386 unsigned long pfn, sect_start_pfn, sect_end_pfn; local
396 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
399 page_nid = get_nid_for_pfn(pfn);
423 unsigned long pfn, sect_start_pf local
458 unsigned long pfn; local
[all...]
H A Dmemory.c127 unsigned long i, pfn; local
134 pfn = section_nr_to_pfn(mem->start_section_nr + i);
135 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
192 unsigned long pfn = start_pfn; local
199 for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
200 if (WARN_ON_ONCE(!pfn_valid(pfn)))
202 page = pfn_to_page(pfn);
210 pfn_to_section_nr(pfn), j);
485 u64 pfn; local
488 if (kstrtoull(buf, 0, &pfn) <
504 u64 pfn; local
[all...]
H A Ddma-mapping.c253 unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); local
263 pfn + off,
310 unsigned long pfn; local
316 for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
317 pages[i] = pfn_to_page(pfn + i);
/drivers/vfio/
H A Dvfio_iommu_type1.c188 * pfn conversion and shouldn't be tracked as locked pages.
190 static bool is_invalid_reserved_pfn(unsigned long pfn) argument
192 if (pfn_valid(pfn)) {
194 struct page *tail = pfn_to_page(pfn);
218 static int put_pfn(unsigned long pfn, int prot) argument
220 if (!is_invalid_reserved_pfn(pfn)) {
221 struct page *page = pfn_to_page(pfn);
230 static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) argument
237 *pfn = page_to_pfn(page[0]);
246 *pfn
292 unsigned long pfn = 0; local
316 vfio_unpin_pages(unsigned long pfn, long npage, int prot, bool do_accounting) argument
480 map_try_harder(struct vfio_domain *domain, dma_addr_t iova, unsigned long pfn, long npage, int prot) argument
500 vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, unsigned long pfn, long npage, int prot) argument
535 unsigned long pfn; local
[all...]
/drivers/gpu/drm/gma500/
H A Dgem.c179 unsigned long pfn; local
212 pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
214 pfn = page_to_pfn(r->pages[page_offset]);
215 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
H A Dmmu.h82 unsigned long *pfn);
/drivers/infiniband/hw/mthca/
H A Dmthca_uar.c44 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
/drivers/video/adf/
H A Dadf_memblock.c28 unsigned long pfn = PFN_DOWN(pdata->base); local
29 struct page *page = pfn_to_page(pfn);
79 unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset; local
80 struct page *page = pfn_to_page(pfn);
/drivers/misc/
H A Dvmw_balloon.c318 static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, argument
324 pfn32 = (u32)pfn;
325 if (pfn32 != pfn)
330 *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
334 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
343 static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn) argument
348 pfn32 = (u32)pfn;
349 if (pfn32 != pfn)
354 status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy);
358 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, statu
[all...]
/drivers/char/
H A Dmem.c26 #include <linux/pfn.h>
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) argument
63 static inline int range_is_allowed(unsigned long pfn, unsigned long size) argument
65 u64 from = ((u64)pfn) << PAGE_SHIFT;
70 if (!devmem_is_allowed(pfn)) {
77 pfn++;
82 static inline int range_is_allowed(unsigned long pfn, unsigned long size) argument
227 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
268 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, argument
272 phys_addr_t offset = pfn << PAGE_SHIF
226 phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) argument
351 unsigned long pfn; local
[all...]
/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c93 unsigned long pfn; local
216 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
228 pfn = page_to_pfn(page);
232 ret = vm_insert_mixed(&cvma, address, pfn);
234 ret = vm_insert_pfn(&cvma, address, pfn);
/drivers/edac/
H A Dcell_edac.c38 unsigned long address, pfn, offset, syndrome; local
47 pfn = address >> PAGE_SHIFT;
53 csrow->first_page + pfn, offset, syndrome,
61 unsigned long address, pfn, offset; local
70 pfn = address >> PAGE_SHIFT;
75 csrow->first_page + pfn, offset, 0,
H A Di3000_edac.c50 * unsigned long pfn and offset from hardware regs which are u8/u32.
237 unsigned long pfn, offset; local
254 pfn = deap_pfn(info->edeap, info->deap);
258 row = edac_mc_find_csrow_by_page(mci, pfn);
262 pfn, offset, 0,
267 pfn, offset, info->derrsyn,
/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c628 static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add) argument
641 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
671 int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages) argument
676 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
681 int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) argument
686 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
691 static int ehea_is_hugepage(unsigned long pfn) argument
695 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
698 page_order = compound_order(pfn_to_page(pfn));
709 unsigned long pfn, start_pf local
[all...]
/drivers/scsi/csiostor/
H A Dcsio_mb.c104 * @mpfn: Master pfn
281 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
448 FW_IQ_CMD_PFN(iq_params->pfn) |
504 FW_IQ_CMD_PFN(iq_params->pfn) |
627 FW_IQ_CMD_PFN(iq_params->pfn) |
662 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
709 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
812 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
1155 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1156 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DAT
[all...]
/drivers/iommu/
H A Diova.c70 /* only cache if it's below 32bit pfn */
240 * find_iova - find's an iova for a given pfn
242 * @pfn: - page frame number
244 * given doamin which matches the given pfn.
246 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) argument
257 /* If pfn falls within iova's range, return iova */
258 if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
269 if (pfn < iova->pfn_lo)
271 else if (pfn > iov
305 free_iova(struct iova_domain *iovad, unsigned long pfn) argument
[all...]
H A Dtegra-smmu.c226 #define SMMU_PFN_TO_PTE(pfn, attr) (unsigned long)((pfn) | (attr))
600 dma_addr_t iova, unsigned long pfn)
605 page = pfn_to_page(pfn);
611 vaddr[1] = pfn << PAGE_SHIFT;
616 unsigned long addr, unsigned long pfn)
707 unsigned long pfn)
720 *pte = SMMU_PFN_TO_PTE(pfn, as->pte_attr);
725 put_signature(as, iova, pfn);
732 unsigned long pfn local
599 put_signature(struct smmu_as *as, dma_addr_t iova, unsigned long pfn) argument
615 put_signature(struct smmu_as *as, unsigned long addr, unsigned long pfn) argument
706 __smmu_iommu_map_pfn(struct smmu_as *as, dma_addr_t iova, unsigned long pfn) argument
767 unsigned long pfn; local
[all...]
/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.c87 unsigned long pfn; local
105 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
107 return vm_insert_mixed(vma, f_vaddr, pfn);
430 unsigned long pfn; local
431 int ret = follow_pfn(vma, start, &pfn);
435 pages[i] = pfn_to_page(pfn);
/drivers/misc/sgi-gru/
H A Dgruhandles.c175 tfh->pfn = paddr >> GRU_PADDR_SHIFT;
191 tfh->pfn = paddr >> GRU_PADDR_SHIFT;

Completed in 341 milliseconds

1234