1/****************************************************************************** 2 * grant_table.c 3 * 4 * Granting foreign access to our memory reservation. 5 * 6 * Copyright (c) 2005-2006, Christopher Clark 7 * Copyright (c) 2004-2005, K A Fraser 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version 2 11 * as published by the Free Software Foundation; or, when distributed 12 * separately from the Linux kernel or incorporated into other 13 * software packages, subject to the following license: 14 * 15 * Permission is hereby granted, free of charge, to any person obtaining a copy 16 * of this source file (the "Software"), to deal in the Software without 17 * restriction, including without limitation the rights to use, copy, modify, 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 19 * and to permit persons to whom the Software is furnished to do so, subject to 20 * the following conditions: 21 * 22 * The above copyright notice and this permission notice shall be included in 23 * all copies or substantial portions of the Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 31 * IN THE SOFTWARE. 32 */ 33 34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 35 36#include <linux/module.h> 37#include <linux/sched.h> 38#include <linux/mm.h> 39#include <linux/slab.h> 40#include <linux/vmalloc.h> 41#include <linux/uaccess.h> 42#include <linux/io.h> 43#include <linux/delay.h> 44#include <linux/hardirq.h> 45 46#include <xen/xen.h> 47#include <xen/interface/xen.h> 48#include <xen/page.h> 49#include <xen/grant_table.h> 50#include <xen/interface/memory.h> 51#include <xen/hvc-console.h> 52#include <xen/swiotlb-xen.h> 53#include <asm/xen/hypercall.h> 54#include <asm/xen/interface.h> 55 56#include <asm/pgtable.h> 57#include <asm/sync_bitops.h> 58 59/* External tools reserve first few grant table entries. */ 60#define NR_RESERVED_ENTRIES 8 61#define GNTTAB_LIST_END 0xffffffff 62 63static grant_ref_t **gnttab_list; 64static unsigned int nr_grant_frames; 65static int gnttab_free_count; 66static grant_ref_t gnttab_free_head; 67static DEFINE_SPINLOCK(gnttab_list_lock); 68struct grant_frames xen_auto_xlat_grant_frames; 69 70static union { 71 struct grant_entry_v1 *v1; 72 void *addr; 73} gnttab_shared; 74 75/*This is a structure of function pointers for grant table*/ 76struct gnttab_ops { 77 /* 78 * Mapping a list of frames for storing grant entries. Frames parameter 79 * is used to store grant table address when grant table being setup, 80 * nr_gframes is the number of frames to map grant table. Returning 81 * GNTST_okay means success and negative value means failure. 82 */ 83 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes); 84 /* 85 * Release a list of frames which are mapped in map_frames for grant 86 * entry status. 87 */ 88 void (*unmap_frames)(void); 89 /* 90 * Introducing a valid entry into the grant table, granting the frame of 91 * this grant entry to domain for accessing or transfering. Ref 92 * parameter is reference of this introduced grant entry, domid is id of 93 * granted domain, frame is the page frame to be granted, and flags is 94 * status of the grant entry to be updated. 95 */ 96 void (*update_entry)(grant_ref_t ref, domid_t domid, 97 unsigned long frame, unsigned flags); 98 /* 99 * Stop granting a grant entry to domain for accessing. Ref parameter is 100 * reference of a grant entry whose grant access will be stopped, 101 * readonly is not in use in this function. If the grant entry is 102 * currently mapped for reading or writing, just return failure(==0) 103 * directly and don't tear down the grant access. Otherwise, stop grant 104 * access for this entry and return success(==1). 105 */ 106 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly); 107 /* 108 * Stop granting a grant entry to domain for transfer. Ref parameter is 109 * reference of a grant entry whose grant transfer will be stopped. If 110 * tranfer has not started, just reclaim the grant entry and return 111 * failure(==0). Otherwise, wait for the transfer to complete and then 112 * return the frame. 113 */ 114 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref); 115 /* 116 * Query the status of a grant entry. Ref parameter is reference of 117 * queried grant entry, return value is the status of queried entry. 118 * Detailed status(writing/reading) can be gotten from the return value 119 * by bit operations. 120 */ 121 int (*query_foreign_access)(grant_ref_t ref); 122}; 123 124static struct gnttab_ops *gnttab_interface; 125 126static int grant_table_version; 127static int grefs_per_grant_frame; 128 129static struct gnttab_free_callback *gnttab_free_callback_list; 130 131static int gnttab_expand(unsigned int req_entries); 132 133#define RPP (PAGE_SIZE / sizeof(grant_ref_t)) 134#define SPP (PAGE_SIZE / sizeof(grant_status_t)) 135 136static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) 137{ 138 return &gnttab_list[(entry) / RPP][(entry) % RPP]; 139} 140/* This can be used as an l-value */ 141#define gnttab_entry(entry) (*__gnttab_entry(entry)) 142 143static int get_free_entries(unsigned count) 144{ 145 unsigned long flags; 146 int ref, rc = 0; 147 grant_ref_t head; 148 149 spin_lock_irqsave(&gnttab_list_lock, flags); 150 151 if ((gnttab_free_count < count) && 152 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { 153 spin_unlock_irqrestore(&gnttab_list_lock, flags); 154 return rc; 155 } 156 157 ref = head = gnttab_free_head; 158 gnttab_free_count -= count; 159 while (count-- > 1) 160 head = gnttab_entry(head); 161 gnttab_free_head = gnttab_entry(head); 162 gnttab_entry(head) = GNTTAB_LIST_END; 163 164 spin_unlock_irqrestore(&gnttab_list_lock, flags); 165 166 return ref; 167} 168 169static void do_free_callbacks(void) 170{ 171 struct gnttab_free_callback *callback, *next; 172 173 callback = gnttab_free_callback_list; 174 gnttab_free_callback_list = NULL; 175 176 while (callback != NULL) { 177 next = callback->next; 178 if (gnttab_free_count >= callback->count) { 179 callback->next = NULL; 180 callback->fn(callback->arg); 181 } else { 182 callback->next = gnttab_free_callback_list; 183 gnttab_free_callback_list = callback; 184 } 185 callback = next; 186 } 187} 188 189static inline void check_free_callbacks(void) 190{ 191 if (unlikely(gnttab_free_callback_list)) 192 do_free_callbacks(); 193} 194 195static void put_free_entry(grant_ref_t ref) 196{ 197 unsigned long flags; 198 spin_lock_irqsave(&gnttab_list_lock, flags); 199 gnttab_entry(ref) = gnttab_free_head; 200 gnttab_free_head = ref; 201 gnttab_free_count++; 202 check_free_callbacks(); 203 spin_unlock_irqrestore(&gnttab_list_lock, flags); 204} 205 206/* 207 * Following applies to gnttab_update_entry_v1. 208 * Introducing a valid entry into the grant table: 209 * 1. Write ent->domid. 210 * 2. Write ent->frame: 211 * GTF_permit_access: Frame to which access is permitted. 212 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new 213 * frame, or zero if none. 214 * 3. Write memory barrier (WMB). 215 * 4. Write ent->flags, inc. valid type. 216 */ 217static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid, 218 unsigned long frame, unsigned flags) 219{ 220 gnttab_shared.v1[ref].domid = domid; 221 gnttab_shared.v1[ref].frame = frame; 222 wmb(); 223 gnttab_shared.v1[ref].flags = flags; 224} 225 226/* 227 * Public grant-issuing interface functions 228 */ 229void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, 230 unsigned long frame, int readonly) 231{ 232 gnttab_interface->update_entry(ref, domid, frame, 233 GTF_permit_access | (readonly ? GTF_readonly : 0)); 234} 235EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); 236 237int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, 238 int readonly) 239{ 240 int ref; 241 242 ref = get_free_entries(1); 243 if (unlikely(ref < 0)) 244 return -ENOSPC; 245 246 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly); 247 248 return ref; 249} 250EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); 251 252static int gnttab_query_foreign_access_v1(grant_ref_t ref) 253{ 254 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing); 255} 256 257int gnttab_query_foreign_access(grant_ref_t ref) 258{ 259 return gnttab_interface->query_foreign_access(ref); 260} 261EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); 262 263static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly) 264{ 265 u16 flags, nflags; 266 u16 *pflags; 267 268 pflags = &gnttab_shared.v1[ref].flags; 269 nflags = *pflags; 270 do { 271 flags = nflags; 272 if (flags & (GTF_reading|GTF_writing)) 273 return 0; 274 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); 275 276 return 1; 277} 278 279static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 280{ 281 return gnttab_interface->end_foreign_access_ref(ref, readonly); 282} 283 284int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 285{ 286 if (_gnttab_end_foreign_access_ref(ref, readonly)) 287 return 1; 288 pr_warn("WARNING: g.e. %#x still in use!\n", ref); 289 return 0; 290} 291EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 292 293struct deferred_entry { 294 struct list_head list; 295 grant_ref_t ref; 296 bool ro; 297 uint16_t warn_delay; 298 struct page *page; 299}; 300static LIST_HEAD(deferred_list); 301static void gnttab_handle_deferred(unsigned long); 302static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); 303 304static void gnttab_handle_deferred(unsigned long unused) 305{ 306 unsigned int nr = 10; 307 struct deferred_entry *first = NULL; 308 unsigned long flags; 309 310 spin_lock_irqsave(&gnttab_list_lock, flags); 311 while (nr--) { 312 struct deferred_entry *entry 313 = list_first_entry(&deferred_list, 314 struct deferred_entry, list); 315 316 if (entry == first) 317 break; 318 list_del(&entry->list); 319 spin_unlock_irqrestore(&gnttab_list_lock, flags); 320 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) { 321 put_free_entry(entry->ref); 322 if (entry->page) { 323 pr_debug("freeing g.e. %#x (pfn %#lx)\n", 324 entry->ref, page_to_pfn(entry->page)); 325 __free_page(entry->page); 326 } else 327 pr_info("freeing g.e. %#x\n", entry->ref); 328 kfree(entry); 329 entry = NULL; 330 } else { 331 if (!--entry->warn_delay) 332 pr_info("g.e. %#x still pending\n", entry->ref); 333 if (!first) 334 first = entry; 335 } 336 spin_lock_irqsave(&gnttab_list_lock, flags); 337 if (entry) 338 list_add_tail(&entry->list, &deferred_list); 339 else if (list_empty(&deferred_list)) 340 break; 341 } 342 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) { 343 deferred_timer.expires = jiffies + HZ; 344 add_timer(&deferred_timer); 345 } 346 spin_unlock_irqrestore(&gnttab_list_lock, flags); 347} 348 349static void gnttab_add_deferred(grant_ref_t ref, bool readonly, 350 struct page *page) 351{ 352 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 353 const char *what = KERN_WARNING "leaking"; 354 355 if (entry) { 356 unsigned long flags; 357 358 entry->ref = ref; 359 entry->ro = readonly; 360 entry->page = page; 361 entry->warn_delay = 60; 362 spin_lock_irqsave(&gnttab_list_lock, flags); 363 list_add_tail(&entry->list, &deferred_list); 364 if (!timer_pending(&deferred_timer)) { 365 deferred_timer.expires = jiffies + HZ; 366 add_timer(&deferred_timer); 367 } 368 spin_unlock_irqrestore(&gnttab_list_lock, flags); 369 what = KERN_DEBUG "deferring"; 370 } 371 printk("%s g.e. %#x (pfn %#lx)\n", 372 what, ref, page ? page_to_pfn(page) : -1); 373} 374 375void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 376 unsigned long page) 377{ 378 if (gnttab_end_foreign_access_ref(ref, readonly)) { 379 put_free_entry(ref); 380 if (page != 0) 381 free_page(page); 382 } else 383 gnttab_add_deferred(ref, readonly, 384 page ? virt_to_page(page) : NULL); 385} 386EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 387 388int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) 389{ 390 int ref; 391 392 ref = get_free_entries(1); 393 if (unlikely(ref < 0)) 394 return -ENOSPC; 395 gnttab_grant_foreign_transfer_ref(ref, domid, pfn); 396 397 return ref; 398} 399EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); 400 401void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, 402 unsigned long pfn) 403{ 404 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer); 405} 406EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); 407 408static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref) 409{ 410 unsigned long frame; 411 u16 flags; 412 u16 *pflags; 413 414 pflags = &gnttab_shared.v1[ref].flags; 415 416 /* 417 * If a transfer is not even yet started, try to reclaim the grant 418 * reference and return failure (== 0). 419 */ 420 while (!((flags = *pflags) & GTF_transfer_committed)) { 421 if (sync_cmpxchg(pflags, flags, 0) == flags) 422 return 0; 423 cpu_relax(); 424 } 425 426 /* If a transfer is in progress then wait until it is completed. */ 427 while (!(flags & GTF_transfer_completed)) { 428 flags = *pflags; 429 cpu_relax(); 430 } 431 432 rmb(); /* Read the frame number /after/ reading completion status. */ 433 frame = gnttab_shared.v1[ref].frame; 434 BUG_ON(frame == 0); 435 436 return frame; 437} 438 439unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) 440{ 441 return gnttab_interface->end_foreign_transfer_ref(ref); 442} 443EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); 444 445unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) 446{ 447 unsigned long frame = gnttab_end_foreign_transfer_ref(ref); 448 put_free_entry(ref); 449 return frame; 450} 451EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); 452 453void gnttab_free_grant_reference(grant_ref_t ref) 454{ 455 put_free_entry(ref); 456} 457EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); 458 459void gnttab_free_grant_references(grant_ref_t head) 460{ 461 grant_ref_t ref; 462 unsigned long flags; 463 int count = 1; 464 if (head == GNTTAB_LIST_END) 465 return; 466 spin_lock_irqsave(&gnttab_list_lock, flags); 467 ref = head; 468 while (gnttab_entry(ref) != GNTTAB_LIST_END) { 469 ref = gnttab_entry(ref); 470 count++; 471 } 472 gnttab_entry(ref) = gnttab_free_head; 473 gnttab_free_head = head; 474 gnttab_free_count += count; 475 check_free_callbacks(); 476 spin_unlock_irqrestore(&gnttab_list_lock, flags); 477} 478EXPORT_SYMBOL_GPL(gnttab_free_grant_references); 479 480int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) 481{ 482 int h = get_free_entries(count); 483 484 if (h < 0) 485 return -ENOSPC; 486 487 *head = h; 488 489 return 0; 490} 491EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); 492 493int gnttab_empty_grant_references(const grant_ref_t *private_head) 494{ 495 return (*private_head == GNTTAB_LIST_END); 496} 497EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); 498 499int gnttab_claim_grant_reference(grant_ref_t *private_head) 500{ 501 grant_ref_t g = *private_head; 502 if (unlikely(g == GNTTAB_LIST_END)) 503 return -ENOSPC; 504 *private_head = gnttab_entry(g); 505 return g; 506} 507EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); 508 509void gnttab_release_grant_reference(grant_ref_t *private_head, 510 grant_ref_t release) 511{ 512 gnttab_entry(release) = *private_head; 513 *private_head = release; 514} 515EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); 516 517void gnttab_request_free_callback(struct gnttab_free_callback *callback, 518 void (*fn)(void *), void *arg, u16 count) 519{ 520 unsigned long flags; 521 struct gnttab_free_callback *cb; 522 523 spin_lock_irqsave(&gnttab_list_lock, flags); 524 525 /* Check if the callback is already on the list */ 526 cb = gnttab_free_callback_list; 527 while (cb) { 528 if (cb == callback) 529 goto out; 530 cb = cb->next; 531 } 532 533 callback->fn = fn; 534 callback->arg = arg; 535 callback->count = count; 536 callback->next = gnttab_free_callback_list; 537 gnttab_free_callback_list = callback; 538 check_free_callbacks(); 539out: 540 spin_unlock_irqrestore(&gnttab_list_lock, flags); 541} 542EXPORT_SYMBOL_GPL(gnttab_request_free_callback); 543 544void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) 545{ 546 struct gnttab_free_callback **pcb; 547 unsigned long flags; 548 549 spin_lock_irqsave(&gnttab_list_lock, flags); 550 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { 551 if (*pcb == callback) { 552 *pcb = callback->next; 553 break; 554 } 555 } 556 spin_unlock_irqrestore(&gnttab_list_lock, flags); 557} 558EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); 559 560static int grow_gnttab_list(unsigned int more_frames) 561{ 562 unsigned int new_nr_grant_frames, extra_entries, i; 563 unsigned int nr_glist_frames, new_nr_glist_frames; 564 565 BUG_ON(grefs_per_grant_frame == 0); 566 567 new_nr_grant_frames = nr_grant_frames + more_frames; 568 extra_entries = more_frames * grefs_per_grant_frame; 569 570 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 571 new_nr_glist_frames = 572 (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 573 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) { 574 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); 575 if (!gnttab_list[i]) 576 goto grow_nomem; 577 } 578 579 580 for (i = grefs_per_grant_frame * nr_grant_frames; 581 i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++) 582 gnttab_entry(i) = i + 1; 583 584 gnttab_entry(i) = gnttab_free_head; 585 gnttab_free_head = grefs_per_grant_frame * nr_grant_frames; 586 gnttab_free_count += extra_entries; 587 588 nr_grant_frames = new_nr_grant_frames; 589 590 check_free_callbacks(); 591 592 return 0; 593 594grow_nomem: 595 while (i-- > nr_glist_frames) 596 free_page((unsigned long) gnttab_list[i]); 597 return -ENOMEM; 598} 599 600static unsigned int __max_nr_grant_frames(void) 601{ 602 struct gnttab_query_size query; 603 int rc; 604 605 query.dom = DOMID_SELF; 606 607 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); 608 if ((rc < 0) || (query.status != GNTST_okay)) 609 return 4; /* Legacy max supported number of frames */ 610 611 return query.max_nr_frames; 612} 613 614unsigned int gnttab_max_grant_frames(void) 615{ 616 unsigned int xen_max = __max_nr_grant_frames(); 617 static unsigned int boot_max_nr_grant_frames; 618 619 /* First time, initialize it properly. */ 620 if (!boot_max_nr_grant_frames) 621 boot_max_nr_grant_frames = __max_nr_grant_frames(); 622 623 if (xen_max > boot_max_nr_grant_frames) 624 return boot_max_nr_grant_frames; 625 return xen_max; 626} 627EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); 628 629int gnttab_setup_auto_xlat_frames(phys_addr_t addr) 630{ 631 xen_pfn_t *pfn; 632 unsigned int max_nr_gframes = __max_nr_grant_frames(); 633 unsigned int i; 634 void *vaddr; 635 636 if (xen_auto_xlat_grant_frames.count) 637 return -EINVAL; 638 639 vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes); 640 if (vaddr == NULL) { 641 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n", 642 &addr); 643 return -ENOMEM; 644 } 645 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); 646 if (!pfn) { 647 xen_unmap(vaddr); 648 return -ENOMEM; 649 } 650 for (i = 0; i < max_nr_gframes; i++) 651 pfn[i] = PFN_DOWN(addr) + i; 652 653 xen_auto_xlat_grant_frames.vaddr = vaddr; 654 xen_auto_xlat_grant_frames.pfn = pfn; 655 xen_auto_xlat_grant_frames.count = max_nr_gframes; 656 657 return 0; 658} 659EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames); 660 661void gnttab_free_auto_xlat_frames(void) 662{ 663 if (!xen_auto_xlat_grant_frames.count) 664 return; 665 kfree(xen_auto_xlat_grant_frames.pfn); 666 xen_unmap(xen_auto_xlat_grant_frames.vaddr); 667 668 xen_auto_xlat_grant_frames.pfn = NULL; 669 xen_auto_xlat_grant_frames.count = 0; 670 xen_auto_xlat_grant_frames.vaddr = NULL; 671} 672EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames); 673 674/* Handling of paged out grant targets (GNTST_eagain) */ 675#define MAX_DELAY 256 676static inline void 677gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status, 678 const char *func) 679{ 680 unsigned delay = 1; 681 682 do { 683 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1)); 684 if (*status == GNTST_eagain) 685 msleep(delay++); 686 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY)); 687 688 if (delay >= MAX_DELAY) { 689 pr_err("%s: %s eagain grant\n", func, current->comm); 690 *status = GNTST_bad_page; 691 } 692} 693 694void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) 695{ 696 struct gnttab_map_grant_ref *op; 697 698 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) 699 BUG(); 700 for (op = batch; op < batch + count; op++) 701 if (op->status == GNTST_eagain) 702 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op, 703 &op->status, __func__); 704} 705EXPORT_SYMBOL_GPL(gnttab_batch_map); 706 707void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) 708{ 709 struct gnttab_copy *op; 710 711 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) 712 BUG(); 713 for (op = batch; op < batch + count; op++) 714 if (op->status == GNTST_eagain) 715 gnttab_retry_eagain_gop(GNTTABOP_copy, op, 716 &op->status, __func__); 717} 718EXPORT_SYMBOL_GPL(gnttab_batch_copy); 719 720int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 721 struct gnttab_map_grant_ref *kmap_ops, 722 struct page **pages, unsigned int count) 723{ 724 int i, ret; 725 726 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 727 if (ret) 728 return ret; 729 730 /* Retry eagain maps */ 731 for (i = 0; i < count; i++) 732 if (map_ops[i].status == GNTST_eagain) 733 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 734 &map_ops[i].status, __func__); 735 736 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count); 737} 738EXPORT_SYMBOL_GPL(gnttab_map_refs); 739 740int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 741 struct gnttab_map_grant_ref *kmap_ops, 742 struct page **pages, unsigned int count) 743{ 744 int ret; 745 746 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 747 if (ret) 748 return ret; 749 750 return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count); 751} 752EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 753 754static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) 755{ 756 int rc; 757 758 rc = arch_gnttab_map_shared(frames, nr_gframes, 759 gnttab_max_grant_frames(), 760 &gnttab_shared.addr); 761 BUG_ON(rc); 762 763 return 0; 764} 765 766static void gnttab_unmap_frames_v1(void) 767{ 768 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames); 769} 770 771static int gnttab_map(unsigned int start_idx, unsigned int end_idx) 772{ 773 struct gnttab_setup_table setup; 774 xen_pfn_t *frames; 775 unsigned int nr_gframes = end_idx + 1; 776 int rc; 777 778 if (xen_feature(XENFEAT_auto_translated_physmap)) { 779 struct xen_add_to_physmap xatp; 780 unsigned int i = end_idx; 781 rc = 0; 782 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes); 783 /* 784 * Loop backwards, so that the first hypercall has the largest 785 * index, ensuring that the table will grow only once. 786 */ 787 do { 788 xatp.domid = DOMID_SELF; 789 xatp.idx = i; 790 xatp.space = XENMAPSPACE_grant_table; 791 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i]; 792 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); 793 if (rc != 0) { 794 pr_warn("grant table add_to_physmap failed, err=%d\n", 795 rc); 796 break; 797 } 798 } while (i-- > start_idx); 799 800 return rc; 801 } 802 803 /* No need for kzalloc as it is initialized in following hypercall 804 * GNTTABOP_setup_table. 805 */ 806 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); 807 if (!frames) 808 return -ENOMEM; 809 810 setup.dom = DOMID_SELF; 811 setup.nr_frames = nr_gframes; 812 set_xen_guest_handle(setup.frame_list, frames); 813 814 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); 815 if (rc == -ENOSYS) { 816 kfree(frames); 817 return -ENOSYS; 818 } 819 820 BUG_ON(rc || setup.status); 821 822 rc = gnttab_interface->map_frames(frames, nr_gframes); 823 824 kfree(frames); 825 826 return rc; 827} 828 829static struct gnttab_ops gnttab_v1_ops = { 830 .map_frames = gnttab_map_frames_v1, 831 .unmap_frames = gnttab_unmap_frames_v1, 832 .update_entry = gnttab_update_entry_v1, 833 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1, 834 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1, 835 .query_foreign_access = gnttab_query_foreign_access_v1, 836}; 837 838static void gnttab_request_version(void) 839{ 840 /* Only version 1 is used, which will always be available. */ 841 grant_table_version = 1; 842 grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1); 843 gnttab_interface = &gnttab_v1_ops; 844 845 pr_info("Grant tables using version %d layout\n", grant_table_version); 846} 847 848static int gnttab_setup(void) 849{ 850 unsigned int max_nr_gframes; 851 852 max_nr_gframes = gnttab_max_grant_frames(); 853 if (max_nr_gframes < nr_grant_frames) 854 return -ENOSYS; 855 856 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { 857 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; 858 if (gnttab_shared.addr == NULL) { 859 pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n", 860 (unsigned long)xen_auto_xlat_grant_frames.vaddr); 861 return -ENOMEM; 862 } 863 } 864 return gnttab_map(0, nr_grant_frames - 1); 865} 866 867int gnttab_resume(void) 868{ 869 gnttab_request_version(); 870 return gnttab_setup(); 871} 872 873int gnttab_suspend(void) 874{ 875 if (!xen_feature(XENFEAT_auto_translated_physmap)) 876 gnttab_interface->unmap_frames(); 877 return 0; 878} 879 880static int gnttab_expand(unsigned int req_entries) 881{ 882 int rc; 883 unsigned int cur, extra; 884 885 BUG_ON(grefs_per_grant_frame == 0); 886 cur = nr_grant_frames; 887 extra = ((req_entries + (grefs_per_grant_frame-1)) / 888 grefs_per_grant_frame); 889 if (cur + extra > gnttab_max_grant_frames()) 890 return -ENOSPC; 891 892 rc = gnttab_map(cur, cur + extra - 1); 893 if (rc == 0) 894 rc = grow_gnttab_list(extra); 895 896 return rc; 897} 898 899int gnttab_init(void) 900{ 901 int i; 902 unsigned long max_nr_grant_frames; 903 unsigned int max_nr_glist_frames, nr_glist_frames; 904 unsigned int nr_init_grefs; 905 int ret; 906 907 gnttab_request_version(); 908 max_nr_grant_frames = gnttab_max_grant_frames(); 909 nr_grant_frames = 1; 910 911 /* Determine the maximum number of frames required for the 912 * grant reference free list on the current hypervisor. 913 */ 914 BUG_ON(grefs_per_grant_frame == 0); 915 max_nr_glist_frames = (max_nr_grant_frames * 916 grefs_per_grant_frame / RPP); 917 918 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 919 GFP_KERNEL); 920 if (gnttab_list == NULL) 921 return -ENOMEM; 922 923 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP; 924 for (i = 0; i < nr_glist_frames; i++) { 925 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); 926 if (gnttab_list[i] == NULL) { 927 ret = -ENOMEM; 928 goto ini_nomem; 929 } 930 } 931 932 ret = arch_gnttab_init(max_nr_grant_frames); 933 if (ret < 0) 934 goto ini_nomem; 935 936 if (gnttab_setup() < 0) { 937 ret = -ENODEV; 938 goto ini_nomem; 939 } 940 941 nr_init_grefs = nr_grant_frames * grefs_per_grant_frame; 942 943 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) 944 gnttab_entry(i) = i + 1; 945 946 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; 947 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; 948 gnttab_free_head = NR_RESERVED_ENTRIES; 949 950 printk("Grant table initialized\n"); 951 return 0; 952 953 ini_nomem: 954 for (i--; i >= 0; i--) 955 free_page((unsigned long)gnttab_list[i]); 956 kfree(gnttab_list); 957 return ret; 958} 959EXPORT_SYMBOL_GPL(gnttab_init); 960 961static int __gnttab_init(void) 962{ 963 /* Delay grant-table initialization in the PV on HVM case */ 964 if (xen_hvm_domain()) 965 return 0; 966 967 if (!xen_pv_domain()) 968 return -ENODEV; 969 970 return gnttab_init(); 971} 972/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called 973 * beforehand to initialize xen_auto_xlat_grant_frames. */ 974core_initcall_sync(__gnttab_init); 975