1/* 2 * Linux MegaRAID driver for SAS based RAID controllers 3 * 4 * Copyright (c) 2009-2012 LSI Corporation. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * 20 * FILE: megaraid_sas_fusion.c 21 * 22 * Authors: LSI Corporation 23 * Sumant Patro 24 * Adam Radford <linuxraid@lsi.com> 25 * 26 * Send feedback to: <megaraidlinux@lsi.com> 27 * 28 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035 29 * ATTN: Linuxraid 30 */ 31 32#include <linux/kernel.h> 33#include <linux/types.h> 34#include <linux/pci.h> 35#include <linux/list.h> 36#include <linux/moduleparam.h> 37#include <linux/module.h> 38#include <linux/spinlock.h> 39#include <linux/interrupt.h> 40#include <linux/delay.h> 41#include <linux/uio.h> 42#include <linux/uaccess.h> 43#include <linux/fs.h> 44#include <linux/compat.h> 45#include <linux/blkdev.h> 46#include <linux/mutex.h> 47#include <linux/poll.h> 48 49#include <scsi/scsi.h> 50#include <scsi/scsi_cmnd.h> 51#include <scsi/scsi_device.h> 52#include <scsi/scsi_host.h> 53#include <scsi/scsi_dbg.h> 54 55#include "megaraid_sas_fusion.h" 56#include "megaraid_sas.h" 57 58extern void megasas_free_cmds(struct megasas_instance *instance); 59extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance 60 *instance); 61extern void 62megasas_complete_cmd(struct megasas_instance *instance, 63 struct megasas_cmd *cmd, u8 alt_status); 64int megasas_is_ldio(struct scsi_cmnd *cmd); 65int 66wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 67 int seconds); 68 69void 70megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); 71int megasas_alloc_cmds(struct megasas_instance *instance); 72int 73megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); 74int 75megasas_issue_polled(struct megasas_instance *instance, 76 struct megasas_cmd *cmd); 77void 78megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 79 80int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 81void megaraid_sas_kill_hba(struct megasas_instance *instance); 82 83extern u32 megasas_dbg_lvl; 84void megasas_sriov_heartbeat_handler(unsigned long instance_addr); 85int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 86 int initial); 87void megasas_start_timer(struct megasas_instance *instance, 88 struct timer_list *timer, 89 void *fn, unsigned long interval); 90extern struct megasas_mgmt_info megasas_mgmt_info; 91extern int resetwaittime; 92 93 94 95/** 96 * megasas_enable_intr_fusion - Enables interrupts 97 * @regs: MFI register set 98 */ 99void 100megasas_enable_intr_fusion(struct megasas_instance *instance) 101{ 102 struct megasas_register_set __iomem *regs; 103 regs = instance->reg_set; 104 /* For Thunderbolt/Invader also clear intr on enable */ 105 writel(~0, ®s->outbound_intr_status); 106 readl(®s->outbound_intr_status); 107 108 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 109 110 /* Dummy readl to force pci flush */ 111 readl(®s->outbound_intr_mask); 112 instance->mask_interrupts = 0; 113} 114 115/** 116 * megasas_disable_intr_fusion - Disables interrupt 117 * @regs: MFI register set 118 */ 119void 120megasas_disable_intr_fusion(struct megasas_instance *instance) 121{ 122 u32 mask = 0xFFFFFFFF; 123 u32 status; 124 struct megasas_register_set __iomem *regs; 125 regs = instance->reg_set; 126 instance->mask_interrupts = 1; 127 128 writel(mask, ®s->outbound_intr_mask); 129 /* Dummy readl to force pci flush */ 130 status = readl(®s->outbound_intr_mask); 131} 132 133int 134megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) 135{ 136 u32 status; 137 /* 138 * Check if it is our interrupt 139 */ 140 status = readl(®s->outbound_intr_status); 141 142 if (status & 1) { 143 writel(status, ®s->outbound_intr_status); 144 readl(®s->outbound_intr_status); 145 return 1; 146 } 147 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 148 return 0; 149 150 return 1; 151} 152 153/** 154 * megasas_get_cmd_fusion - Get a command from the free pool 155 * @instance: Adapter soft state 156 * 157 * Returns a free command from the pool 158 */ 159struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance 160 *instance) 161{ 162 unsigned long flags; 163 struct fusion_context *fusion = 164 (struct fusion_context *)instance->ctrl_context; 165 struct megasas_cmd_fusion *cmd = NULL; 166 167 spin_lock_irqsave(&fusion->mpt_pool_lock, flags); 168 169 if (!list_empty(&fusion->cmd_pool)) { 170 cmd = list_entry((&fusion->cmd_pool)->next, 171 struct megasas_cmd_fusion, list); 172 list_del_init(&cmd->list); 173 } else { 174 printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); 175 } 176 177 spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); 178 return cmd; 179} 180 181/** 182 * megasas_return_cmd_fusion - Return a cmd to free command pool 183 * @instance: Adapter soft state 184 * @cmd: Command packet to be returned to free command pool 185 */ 186inline void megasas_return_cmd_fusion(struct megasas_instance *instance, 187 struct megasas_cmd_fusion *cmd) 188{ 189 unsigned long flags; 190 struct fusion_context *fusion = 191 (struct fusion_context *)instance->ctrl_context; 192 193 spin_lock_irqsave(&fusion->mpt_pool_lock, flags); 194 195 cmd->scmd = NULL; 196 cmd->sync_cmd_idx = (u32)ULONG_MAX; 197 list_add(&cmd->list, (&fusion->cmd_pool)->next); 198 199 spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags); 200} 201 202/** 203 * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool 204 * @instance: Adapter soft state 205 * @cmd_mfi: MFI Command packet to be returned to free command pool 206 * @cmd_mpt: MPT Command packet to be returned to free command pool 207 */ 208inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance, 209 struct megasas_cmd *cmd_mfi, 210 struct megasas_cmd_fusion *cmd_fusion) 211{ 212 unsigned long flags; 213 214 /* 215 * TO DO: optimize this code and use only one lock instead of two 216 * locks being used currently- mpt_pool_lock is acquired 217 * inside mfi_pool_lock 218 */ 219 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 220 megasas_return_cmd_fusion(instance, cmd_fusion); 221 if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED) 222 dev_err(&instance->pdev->dev, "Possible bug from %s %d\n", 223 __func__, __LINE__); 224 atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED); 225 __megasas_return_cmd(instance, cmd_mfi); 226 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 227} 228 229/** 230 * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool 231 * @instance: Adapter soft state 232 */ 233static void megasas_teardown_frame_pool_fusion( 234 struct megasas_instance *instance) 235{ 236 int i; 237 struct fusion_context *fusion = instance->ctrl_context; 238 239 u16 max_cmd = instance->max_fw_cmds; 240 241 struct megasas_cmd_fusion *cmd; 242 243 if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) { 244 printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, " 245 "sense pool : %p\n", fusion->sg_dma_pool, 246 fusion->sense_dma_pool); 247 return; 248 } 249 250 /* 251 * Return all frames to pool 252 */ 253 for (i = 0; i < max_cmd; i++) { 254 255 cmd = fusion->cmd_list[i]; 256 257 if (cmd->sg_frame) 258 pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame, 259 cmd->sg_frame_phys_addr); 260 261 if (cmd->sense) 262 pci_pool_free(fusion->sense_dma_pool, cmd->sense, 263 cmd->sense_phys_addr); 264 } 265 266 /* 267 * Now destroy the pool itself 268 */ 269 pci_pool_destroy(fusion->sg_dma_pool); 270 pci_pool_destroy(fusion->sense_dma_pool); 271 272 fusion->sg_dma_pool = NULL; 273 fusion->sense_dma_pool = NULL; 274} 275 276/** 277 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool 278 * @instance: Adapter soft state 279 */ 280void 281megasas_free_cmds_fusion(struct megasas_instance *instance) 282{ 283 int i; 284 struct fusion_context *fusion = instance->ctrl_context; 285 286 u32 max_cmds, req_sz, reply_sz, io_frames_sz; 287 288 289 req_sz = fusion->request_alloc_sz; 290 reply_sz = fusion->reply_alloc_sz; 291 io_frames_sz = fusion->io_frames_alloc_sz; 292 293 max_cmds = instance->max_fw_cmds; 294 295 /* Free descriptors and request Frames memory */ 296 if (fusion->req_frames_desc) 297 dma_free_coherent(&instance->pdev->dev, req_sz, 298 fusion->req_frames_desc, 299 fusion->req_frames_desc_phys); 300 301 if (fusion->reply_frames_desc) { 302 pci_pool_free(fusion->reply_frames_desc_pool, 303 fusion->reply_frames_desc, 304 fusion->reply_frames_desc_phys); 305 pci_pool_destroy(fusion->reply_frames_desc_pool); 306 } 307 308 if (fusion->io_request_frames) { 309 pci_pool_free(fusion->io_request_frames_pool, 310 fusion->io_request_frames, 311 fusion->io_request_frames_phys); 312 pci_pool_destroy(fusion->io_request_frames_pool); 313 } 314 315 /* Free the Fusion frame pool */ 316 megasas_teardown_frame_pool_fusion(instance); 317 318 /* Free all the commands in the cmd_list */ 319 for (i = 0; i < max_cmds; i++) 320 kfree(fusion->cmd_list[i]); 321 322 /* Free the cmd_list buffer itself */ 323 kfree(fusion->cmd_list); 324 fusion->cmd_list = NULL; 325 326 INIT_LIST_HEAD(&fusion->cmd_pool); 327} 328 329/** 330 * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames 331 * @instance: Adapter soft state 332 * 333 */ 334static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) 335{ 336 int i; 337 u32 max_cmd; 338 struct fusion_context *fusion; 339 struct megasas_cmd_fusion *cmd; 340 u32 total_sz_chain_frame; 341 342 fusion = instance->ctrl_context; 343 max_cmd = instance->max_fw_cmds; 344 345 total_sz_chain_frame = MEGASAS_MAX_SZ_CHAIN_FRAME; 346 347 /* 348 * Use DMA pool facility provided by PCI layer 349 */ 350 351 fusion->sg_dma_pool = pci_pool_create("megasas sg pool fusion", 352 instance->pdev, 353 total_sz_chain_frame, 4, 354 0); 355 if (!fusion->sg_dma_pool) { 356 printk(KERN_DEBUG "megasas: failed to setup request pool " 357 "fusion\n"); 358 return -ENOMEM; 359 } 360 fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion", 361 instance->pdev, 362 SCSI_SENSE_BUFFERSIZE, 64, 0); 363 364 if (!fusion->sense_dma_pool) { 365 printk(KERN_DEBUG "megasas: failed to setup sense pool " 366 "fusion\n"); 367 pci_pool_destroy(fusion->sg_dma_pool); 368 fusion->sg_dma_pool = NULL; 369 return -ENOMEM; 370 } 371 372 /* 373 * Allocate and attach a frame to each of the commands in cmd_list 374 */ 375 for (i = 0; i < max_cmd; i++) { 376 377 cmd = fusion->cmd_list[i]; 378 379 cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool, 380 GFP_KERNEL, 381 &cmd->sg_frame_phys_addr); 382 383 cmd->sense = pci_pool_alloc(fusion->sense_dma_pool, 384 GFP_KERNEL, &cmd->sense_phys_addr); 385 /* 386 * megasas_teardown_frame_pool_fusion() takes care of freeing 387 * whatever has been allocated 388 */ 389 if (!cmd->sg_frame || !cmd->sense) { 390 printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n"); 391 megasas_teardown_frame_pool_fusion(instance); 392 return -ENOMEM; 393 } 394 } 395 return 0; 396} 397 398/** 399 * megasas_alloc_cmds_fusion - Allocates the command packets 400 * @instance: Adapter soft state 401 * 402 * 403 * Each frame has a 32-bit field called context. This context is used to get 404 * back the megasas_cmd_fusion from the frame when a frame gets completed 405 * In this driver, the 32 bit values are the indices into an array cmd_list. 406 * This array is used only to look up the megasas_cmd_fusion given the context. 407 * The free commands themselves are maintained in a linked list called cmd_pool. 408 * 409 * cmds are formed in the io_request and sg_frame members of the 410 * megasas_cmd_fusion. The context field is used to get a request descriptor 411 * and is used as SMID of the cmd. 412 * SMID value range is from 1 to max_fw_cmds. 413 */ 414int 415megasas_alloc_cmds_fusion(struct megasas_instance *instance) 416{ 417 int i, j, count; 418 u32 max_cmd, io_frames_sz; 419 struct fusion_context *fusion; 420 struct megasas_cmd_fusion *cmd; 421 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 422 u32 offset; 423 dma_addr_t io_req_base_phys; 424 u8 *io_req_base; 425 426 fusion = instance->ctrl_context; 427 428 max_cmd = instance->max_fw_cmds; 429 430 fusion->req_frames_desc = 431 dma_alloc_coherent(&instance->pdev->dev, 432 fusion->request_alloc_sz, 433 &fusion->req_frames_desc_phys, GFP_KERNEL); 434 435 if (!fusion->req_frames_desc) { 436 printk(KERN_ERR "megasas; Could not allocate memory for " 437 "request_frames\n"); 438 goto fail_req_desc; 439 } 440 441 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 442 fusion->reply_frames_desc_pool = 443 pci_pool_create("reply_frames pool", instance->pdev, 444 fusion->reply_alloc_sz * count, 16, 0); 445 446 if (!fusion->reply_frames_desc_pool) { 447 printk(KERN_ERR "megasas; Could not allocate memory for " 448 "reply_frame pool\n"); 449 goto fail_reply_desc; 450 } 451 452 fusion->reply_frames_desc = 453 pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, 454 &fusion->reply_frames_desc_phys); 455 if (!fusion->reply_frames_desc) { 456 printk(KERN_ERR "megasas; Could not allocate memory for " 457 "reply_frame pool\n"); 458 pci_pool_destroy(fusion->reply_frames_desc_pool); 459 goto fail_reply_desc; 460 } 461 462 reply_desc = fusion->reply_frames_desc; 463 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) 464 reply_desc->Words = ULLONG_MAX; 465 466 io_frames_sz = fusion->io_frames_alloc_sz; 467 468 fusion->io_request_frames_pool = 469 pci_pool_create("io_request_frames pool", instance->pdev, 470 fusion->io_frames_alloc_sz, 16, 0); 471 472 if (!fusion->io_request_frames_pool) { 473 printk(KERN_ERR "megasas: Could not allocate memory for " 474 "io_request_frame pool\n"); 475 goto fail_io_frames; 476 } 477 478 fusion->io_request_frames = 479 pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, 480 &fusion->io_request_frames_phys); 481 if (!fusion->io_request_frames) { 482 printk(KERN_ERR "megasas: Could not allocate memory for " 483 "io_request_frames frames\n"); 484 pci_pool_destroy(fusion->io_request_frames_pool); 485 goto fail_io_frames; 486 } 487 488 /* 489 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers. 490 * Allocate the dynamic array first and then allocate individual 491 * commands. 492 */ 493 fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *) 494 * max_cmd, GFP_KERNEL); 495 496 if (!fusion->cmd_list) { 497 printk(KERN_DEBUG "megasas: out of memory. Could not alloc " 498 "memory for cmd_list_fusion\n"); 499 goto fail_cmd_list; 500 } 501 502 max_cmd = instance->max_fw_cmds; 503 for (i = 0; i < max_cmd; i++) { 504 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), 505 GFP_KERNEL); 506 if (!fusion->cmd_list[i]) { 507 printk(KERN_ERR "Could not alloc cmd list fusion\n"); 508 509 for (j = 0; j < i; j++) 510 kfree(fusion->cmd_list[j]); 511 512 kfree(fusion->cmd_list); 513 fusion->cmd_list = NULL; 514 goto fail_cmd_list; 515 } 516 } 517 518 /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */ 519 io_req_base = fusion->io_request_frames + 520 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 521 io_req_base_phys = fusion->io_request_frames_phys + 522 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 523 524 /* 525 * Add all the commands to command pool (fusion->cmd_pool) 526 */ 527 528 /* SMID 0 is reserved. Set SMID/index from 1 */ 529 for (i = 0; i < max_cmd; i++) { 530 cmd = fusion->cmd_list[i]; 531 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 532 memset(cmd, 0, sizeof(struct megasas_cmd_fusion)); 533 cmd->index = i + 1; 534 cmd->scmd = NULL; 535 cmd->sync_cmd_idx = (u32)ULONG_MAX; /* Set to Invalid */ 536 cmd->instance = instance; 537 cmd->io_request = 538 (struct MPI2_RAID_SCSI_IO_REQUEST *) 539 (io_req_base + offset); 540 memset(cmd->io_request, 0, 541 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)); 542 cmd->io_request_phys_addr = io_req_base_phys + offset; 543 544 list_add_tail(&cmd->list, &fusion->cmd_pool); 545 } 546 547 /* 548 * Create a frame pool and assign one frame to each cmd 549 */ 550 if (megasas_create_frame_pool_fusion(instance)) { 551 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); 552 megasas_free_cmds_fusion(instance); 553 goto fail_req_desc; 554 } 555 556 return 0; 557 558fail_cmd_list: 559 pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames, 560 fusion->io_request_frames_phys); 561 pci_pool_destroy(fusion->io_request_frames_pool); 562fail_io_frames: 563 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, 564 fusion->reply_frames_desc, 565 fusion->reply_frames_desc_phys); 566 pci_pool_free(fusion->reply_frames_desc_pool, 567 fusion->reply_frames_desc, 568 fusion->reply_frames_desc_phys); 569 pci_pool_destroy(fusion->reply_frames_desc_pool); 570 571fail_reply_desc: 572 dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz, 573 fusion->req_frames_desc, 574 fusion->req_frames_desc_phys); 575fail_req_desc: 576 return -ENOMEM; 577} 578 579/** 580 * wait_and_poll - Issues a polling command 581 * @instance: Adapter soft state 582 * @cmd: Command packet to be issued 583 * 584 * For polling, MFI requires the cmd_status to be set to 0xFF before posting. 585 */ 586int 587wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 588 int seconds) 589{ 590 int i; 591 struct megasas_header *frame_hdr = &cmd->frame->hdr; 592 struct fusion_context *fusion; 593 594 u32 msecs = seconds * 1000; 595 596 fusion = instance->ctrl_context; 597 /* 598 * Wait for cmd_status to change 599 */ 600 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) { 601 rmb(); 602 msleep(20); 603 } 604 605 if (frame_hdr->cmd_status == 0xff) { 606 if (fusion) 607 megasas_return_mfi_mpt_pthr(instance, cmd, 608 cmd->mpt_pthr_cmd_blocked); 609 return -ETIME; 610 } 611 612 return 0; 613} 614 615/** 616 * megasas_ioc_init_fusion - Initializes the FW 617 * @instance: Adapter soft state 618 * 619 * Issues the IOC Init cmd 620 */ 621int 622megasas_ioc_init_fusion(struct megasas_instance *instance) 623{ 624 struct megasas_init_frame *init_frame; 625 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; 626 dma_addr_t ioc_init_handle; 627 struct megasas_cmd *cmd; 628 u8 ret; 629 struct fusion_context *fusion; 630 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc; 631 int i; 632 struct megasas_header *frame_hdr; 633 634 fusion = instance->ctrl_context; 635 636 cmd = megasas_get_cmd(instance); 637 638 if (!cmd) { 639 printk(KERN_ERR "Could not allocate cmd for INIT Frame\n"); 640 ret = 1; 641 goto fail_get_cmd; 642 } 643 644 IOCInitMessage = 645 dma_alloc_coherent(&instance->pdev->dev, 646 sizeof(struct MPI2_IOC_INIT_REQUEST), 647 &ioc_init_handle, GFP_KERNEL); 648 649 if (!IOCInitMessage) { 650 printk(KERN_ERR "Could not allocate memory for " 651 "IOCInitMessage\n"); 652 ret = 1; 653 goto fail_fw_init; 654 } 655 656 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST)); 657 658 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; 659 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 660 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); 661 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 662 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); 663 664 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); 665 IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); 666 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); 667 IOCInitMessage->HostMSIxVectors = instance->msix_vectors; 668 init_frame = (struct megasas_init_frame *)cmd->frame; 669 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 670 671 frame_hdr = &cmd->frame->hdr; 672 frame_hdr->cmd_status = 0xFF; 673 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); 674 675 init_frame->cmd = MFI_CMD_INIT; 676 init_frame->cmd_status = 0xFF; 677 678 /* driver support Extended MSIX */ 679 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 680 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 681 init_frame->driver_operations. 682 mfi_capabilities.support_additional_msix = 1; 683 /* driver supports HA / Remote LUN over Fast Path interface */ 684 init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun 685 = 1; 686 init_frame->driver_operations.mfi_capabilities.support_max_255lds 687 = 1; 688 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb 689 = 1; 690 /* Convert capability to LE32 */ 691 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 692 693 init_frame->queue_info_new_phys_addr_hi = 694 cpu_to_le32(upper_32_bits(ioc_init_handle)); 695 init_frame->queue_info_new_phys_addr_lo = 696 cpu_to_le32(lower_32_bits(ioc_init_handle)); 697 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); 698 699 req_desc.Words = 0; 700 req_desc.MFAIo.RequestFlags = 701 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << 702 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 703 cpu_to_le32s((u32 *)&req_desc.MFAIo); 704 req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr); 705 706 /* 707 * disable the intr before firing the init frame 708 */ 709 instance->instancet->disable_intr(instance); 710 711 for (i = 0; i < (10 * 1000); i += 20) { 712 if (readl(&instance->reg_set->doorbell) & 1) 713 msleep(20); 714 else 715 break; 716 } 717 718 instance->instancet->fire_cmd(instance, req_desc.u.low, 719 req_desc.u.high, instance->reg_set); 720 721 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS); 722 723 frame_hdr = &cmd->frame->hdr; 724 if (frame_hdr->cmd_status != 0) { 725 ret = 1; 726 goto fail_fw_init; 727 } 728 printk(KERN_ERR "megasas:IOC Init cmd success\n"); 729 730 ret = 0; 731 732fail_fw_init: 733 megasas_return_cmd(instance, cmd); 734 if (IOCInitMessage) 735 dma_free_coherent(&instance->pdev->dev, 736 sizeof(struct MPI2_IOC_INIT_REQUEST), 737 IOCInitMessage, ioc_init_handle); 738fail_get_cmd: 739 return ret; 740} 741 742/* 743 * megasas_get_ld_map_info - Returns FW's ld_map structure 744 * @instance: Adapter soft state 745 * @pend: Pend the command or not 746 * Issues an internal command (DCMD) to get the FW's controller PD 747 * list structure. This information is mainly used to find out SYSTEM 748 * supported by the FW. 749 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO 750 * dcmd.mbox.b[0] - number of LDs being sync'd 751 * dcmd.mbox.b[1] - 0 - complete command immediately. 752 * - 1 - pend till config change 753 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP 754 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and 755 * uses extended struct MR_FW_RAID_MAP_EXT 756 */ 757static int 758megasas_get_ld_map_info(struct megasas_instance *instance) 759{ 760 int ret = 0; 761 struct megasas_cmd *cmd; 762 struct megasas_dcmd_frame *dcmd; 763 void *ci; 764 dma_addr_t ci_h = 0; 765 u32 size_map_info; 766 struct fusion_context *fusion; 767 768 cmd = megasas_get_cmd(instance); 769 770 if (!cmd) { 771 printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n"); 772 return -ENOMEM; 773 } 774 775 fusion = instance->ctrl_context; 776 777 if (!fusion) { 778 megasas_return_cmd(instance, cmd); 779 return -ENXIO; 780 } 781 782 dcmd = &cmd->frame->dcmd; 783 784 size_map_info = fusion->current_map_sz; 785 786 ci = (void *) fusion->ld_map[(instance->map_id & 1)]; 787 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 788 789 if (!ci) { 790 printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n"); 791 megasas_return_cmd(instance, cmd); 792 return -ENOMEM; 793 } 794 795 memset(ci, 0, fusion->max_map_sz); 796 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 797#if VD_EXT_DEBUG 798 dev_dbg(&instance->pdev->dev, 799 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n", 800 __func__, cpu_to_le32(size_map_info)); 801#endif 802 dcmd->cmd = MFI_CMD_DCMD; 803 dcmd->cmd_status = 0xFF; 804 dcmd->sge_count = 1; 805 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); 806 dcmd->timeout = 0; 807 dcmd->pad_0 = 0; 808 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 809 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 810 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 811 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 812 813 if (instance->ctrl_context && !instance->mask_interrupts) 814 ret = megasas_issue_blocked_cmd(instance, cmd, 815 MEGASAS_BLOCKED_CMD_TIMEOUT); 816 else 817 ret = megasas_issue_polled(instance, cmd); 818 819 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked) 820 megasas_return_mfi_mpt_pthr(instance, cmd, 821 cmd->mpt_pthr_cmd_blocked); 822 else 823 megasas_return_cmd(instance, cmd); 824 825 return ret; 826} 827 828u8 829megasas_get_map_info(struct megasas_instance *instance) 830{ 831 struct fusion_context *fusion = instance->ctrl_context; 832 833 fusion->fast_path_io = 0; 834 if (!megasas_get_ld_map_info(instance)) { 835 if (MR_ValidateMapInfo(instance)) { 836 fusion->fast_path_io = 1; 837 return 0; 838 } 839 } 840 return 1; 841} 842 843/* 844 * megasas_sync_map_info - Returns FW's ld_map structure 845 * @instance: Adapter soft state 846 * 847 * Issues an internal command (DCMD) to get the FW's controller PD 848 * list structure. This information is mainly used to find out SYSTEM 849 * supported by the FW. 850 */ 851int 852megasas_sync_map_info(struct megasas_instance *instance) 853{ 854 int ret = 0, i; 855 struct megasas_cmd *cmd; 856 struct megasas_dcmd_frame *dcmd; 857 u32 size_sync_info, num_lds; 858 struct fusion_context *fusion; 859 struct MR_LD_TARGET_SYNC *ci = NULL; 860 struct MR_DRV_RAID_MAP_ALL *map; 861 struct MR_LD_RAID *raid; 862 struct MR_LD_TARGET_SYNC *ld_sync; 863 dma_addr_t ci_h = 0; 864 u32 size_map_info; 865 866 cmd = megasas_get_cmd(instance); 867 868 if (!cmd) { 869 printk(KERN_DEBUG "megasas: Failed to get cmd for sync" 870 "info.\n"); 871 return -ENOMEM; 872 } 873 874 fusion = instance->ctrl_context; 875 876 if (!fusion) { 877 megasas_return_cmd(instance, cmd); 878 return 1; 879 } 880 881 map = fusion->ld_drv_map[instance->map_id & 1]; 882 883 num_lds = le32_to_cpu(map->raidMap.ldCount); 884 885 dcmd = &cmd->frame->dcmd; 886 887 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds; 888 889 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 890 891 ci = (struct MR_LD_TARGET_SYNC *) 892 fusion->ld_map[(instance->map_id - 1) & 1]; 893 memset(ci, 0, fusion->max_map_sz); 894 895 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 896 897 ld_sync = (struct MR_LD_TARGET_SYNC *)ci; 898 899 for (i = 0; i < num_lds; i++, ld_sync++) { 900 raid = MR_LdRaidGet(i, map); 901 ld_sync->targetId = MR_GetLDTgtId(i, map); 902 ld_sync->seqNum = raid->seqNum; 903 } 904 905 size_map_info = fusion->current_map_sz; 906 907 dcmd->cmd = MFI_CMD_DCMD; 908 dcmd->cmd_status = 0xFF; 909 dcmd->sge_count = 1; 910 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); 911 dcmd->timeout = 0; 912 dcmd->pad_0 = 0; 913 dcmd->data_xfer_len = cpu_to_le32(size_map_info); 914 dcmd->mbox.b[0] = num_lds; 915 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; 916 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); 917 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 918 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 919 920 instance->map_update_cmd = cmd; 921 922 instance->instancet->issue_dcmd(instance, cmd); 923 924 return ret; 925} 926 927/* 928 * meagasas_display_intel_branding - Display branding string 929 * @instance: per adapter object 930 * 931 * Return nothing. 932 */ 933static void 934megasas_display_intel_branding(struct megasas_instance *instance) 935{ 936 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 937 return; 938 939 switch (instance->pdev->device) { 940 case PCI_DEVICE_ID_LSI_INVADER: 941 switch (instance->pdev->subsystem_device) { 942 case MEGARAID_INTEL_RS3DC080_SSDID: 943 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 944 instance->host->host_no, 945 MEGARAID_INTEL_RS3DC080_BRANDING); 946 break; 947 case MEGARAID_INTEL_RS3DC040_SSDID: 948 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 949 instance->host->host_no, 950 MEGARAID_INTEL_RS3DC040_BRANDING); 951 break; 952 case MEGARAID_INTEL_RS3SC008_SSDID: 953 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 954 instance->host->host_no, 955 MEGARAID_INTEL_RS3SC008_BRANDING); 956 break; 957 case MEGARAID_INTEL_RS3MC044_SSDID: 958 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 959 instance->host->host_no, 960 MEGARAID_INTEL_RS3MC044_BRANDING); 961 break; 962 default: 963 break; 964 } 965 break; 966 case PCI_DEVICE_ID_LSI_FURY: 967 switch (instance->pdev->subsystem_device) { 968 case MEGARAID_INTEL_RS3WC080_SSDID: 969 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 970 instance->host->host_no, 971 MEGARAID_INTEL_RS3WC080_BRANDING); 972 break; 973 case MEGARAID_INTEL_RS3WC040_SSDID: 974 dev_info(&instance->pdev->dev, "scsi host %d: %s\n", 975 instance->host->host_no, 976 MEGARAID_INTEL_RS3WC040_BRANDING); 977 break; 978 default: 979 break; 980 } 981 break; 982 default: 983 break; 984 } 985} 986 987/** 988 * megasas_init_adapter_fusion - Initializes the FW 989 * @instance: Adapter soft state 990 * 991 * This is the main function for initializing firmware. 992 */ 993u32 994megasas_init_adapter_fusion(struct megasas_instance *instance) 995{ 996 struct megasas_register_set __iomem *reg_set; 997 struct fusion_context *fusion; 998 u32 max_cmd; 999 int i = 0, count; 1000 1001 fusion = instance->ctrl_context; 1002 1003 reg_set = instance->reg_set; 1004 1005 /* 1006 * Get various operational parameters from status register 1007 */ 1008 instance->max_fw_cmds = 1009 instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF; 1010 instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008); 1011 1012 /* 1013 * Reduce the max supported cmds by 1. This is to ensure that the 1014 * reply_q_sz (1 more than the max cmd that driver may send) 1015 * does not exceed max cmds that the FW can support 1016 */ 1017 instance->max_fw_cmds = instance->max_fw_cmds-1; 1018 /* Only internal cmds (DCMD) need to have MFI frames */ 1019 instance->max_mfi_cmds = MEGASAS_INT_CMDS; 1020 1021 max_cmd = instance->max_fw_cmds; 1022 1023 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16); 1024 1025 fusion->request_alloc_sz = 1026 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; 1027 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) 1028 *(fusion->reply_q_depth); 1029 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + 1030 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * 1031 (max_cmd + 1)); /* Extra 1 for SMID 0 */ 1032 1033 fusion->max_sge_in_main_msg = 1034 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1035 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16; 1036 1037 fusion->max_sge_in_chain = 1038 MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION); 1039 1040 instance->max_num_sge = rounddown_pow_of_two( 1041 fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2); 1042 1043 /* Used for pass thru MFI frame (DCMD) */ 1044 fusion->chain_offset_mfi_pthru = 1045 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16; 1046 1047 fusion->chain_offset_io_request = 1048 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 1049 sizeof(union MPI2_SGE_IO_UNION))/16; 1050 1051 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 1052 for (i = 0 ; i < count; i++) 1053 fusion->last_reply_idx[i] = 0; 1054 1055 /* 1056 * Allocate memory for descriptors 1057 * Create a pool of commands 1058 */ 1059 if (megasas_alloc_cmds(instance)) 1060 goto fail_alloc_mfi_cmds; 1061 if (megasas_alloc_cmds_fusion(instance)) 1062 goto fail_alloc_cmds; 1063 1064 if (megasas_ioc_init_fusion(instance)) 1065 goto fail_ioc_init; 1066 1067 megasas_display_intel_branding(instance); 1068 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) { 1069 dev_err(&instance->pdev->dev, 1070 "Could not get controller info. Fail from %s %d\n", 1071 __func__, __LINE__); 1072 goto fail_ioc_init; 1073 } 1074 1075 instance->supportmax256vd = 1076 instance->ctrl_info->adapterOperations3.supportMaxExtLDs; 1077 /* Below is additional check to address future FW enhancement */ 1078 if (instance->ctrl_info->max_lds > 64) 1079 instance->supportmax256vd = 1; 1080 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS 1081 * MEGASAS_MAX_DEV_PER_CHANNEL; 1082 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS 1083 * MEGASAS_MAX_DEV_PER_CHANNEL; 1084 if (instance->supportmax256vd) { 1085 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 1086 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 1087 } else { 1088 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 1089 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 1090 } 1091 dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n" 1092 "Driver supports %d VDs %d PDs\n", 1093 instance->fw_supported_vd_count, 1094 instance->fw_supported_pd_count, 1095 instance->drv_supported_vd_count, 1096 instance->drv_supported_pd_count); 1097 1098 instance->flag_ieee = 1; 1099 fusion->fast_path_io = 0; 1100 1101 fusion->old_map_sz = 1102 sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * 1103 (instance->fw_supported_vd_count - 1)); 1104 fusion->new_map_sz = 1105 sizeof(struct MR_FW_RAID_MAP_EXT); 1106 fusion->drv_map_sz = 1107 sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) * 1108 (instance->drv_supported_vd_count - 1)); 1109 1110 fusion->drv_map_pages = get_order(fusion->drv_map_sz); 1111 for (i = 0; i < 2; i++) { 1112 fusion->ld_map[i] = NULL; 1113 fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL, 1114 fusion->drv_map_pages); 1115 if (!fusion->ld_drv_map[i]) { 1116 dev_err(&instance->pdev->dev, "Could not allocate " 1117 "memory for local map info for %d pages\n", 1118 fusion->drv_map_pages); 1119 if (i == 1) 1120 free_pages((ulong)fusion->ld_drv_map[0], 1121 fusion->drv_map_pages); 1122 goto fail_ioc_init; 1123 } 1124 } 1125 1126 fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz); 1127 1128 if (instance->supportmax256vd) 1129 fusion->current_map_sz = fusion->new_map_sz; 1130 else 1131 fusion->current_map_sz = fusion->old_map_sz; 1132 1133 1134 for (i = 0; i < 2; i++) { 1135 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1136 fusion->max_map_sz, 1137 &fusion->ld_map_phys[i], 1138 GFP_KERNEL); 1139 if (!fusion->ld_map[i]) { 1140 printk(KERN_ERR "megasas: Could not allocate memory " 1141 "for map info\n"); 1142 goto fail_map_info; 1143 } 1144 } 1145 1146 if (!megasas_get_map_info(instance)) 1147 megasas_sync_map_info(instance); 1148 1149 return 0; 1150 1151fail_map_info: 1152 if (i == 1) 1153 dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz, 1154 fusion->ld_map[0], fusion->ld_map_phys[0]); 1155fail_ioc_init: 1156 megasas_free_cmds_fusion(instance); 1157fail_alloc_cmds: 1158 megasas_free_cmds(instance); 1159fail_alloc_mfi_cmds: 1160 return 1; 1161} 1162 1163/** 1164 * megasas_fire_cmd_fusion - Sends command to the FW 1165 * @frame_phys_addr : Physical address of cmd 1166 * @frame_count : Number of frames for the command 1167 * @regs : MFI register set 1168 */ 1169void 1170megasas_fire_cmd_fusion(struct megasas_instance *instance, 1171 dma_addr_t req_desc_lo, 1172 u32 req_desc_hi, 1173 struct megasas_register_set __iomem *regs) 1174{ 1175#if defined(writeq) && defined(CONFIG_64BIT) 1176 u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo); 1177 1178 writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port); 1179#else 1180 unsigned long flags; 1181 1182 spin_lock_irqsave(&instance->hba_lock, flags); 1183 1184 writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); 1185 writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); 1186 spin_unlock_irqrestore(&instance->hba_lock, flags); 1187#endif 1188} 1189 1190/** 1191 * map_cmd_status - Maps FW cmd status to OS cmd status 1192 * @cmd : Pointer to cmd 1193 * @status : status of cmd returned by FW 1194 * @ext_status : ext status of cmd returned by FW 1195 */ 1196 1197void 1198map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) 1199{ 1200 1201 switch (status) { 1202 1203 case MFI_STAT_OK: 1204 cmd->scmd->result = DID_OK << 16; 1205 break; 1206 1207 case MFI_STAT_SCSI_IO_FAILED: 1208 case MFI_STAT_LD_INIT_IN_PROGRESS: 1209 cmd->scmd->result = (DID_ERROR << 16) | ext_status; 1210 break; 1211 1212 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1213 1214 cmd->scmd->result = (DID_OK << 16) | ext_status; 1215 if (ext_status == SAM_STAT_CHECK_CONDITION) { 1216 memset(cmd->scmd->sense_buffer, 0, 1217 SCSI_SENSE_BUFFERSIZE); 1218 memcpy(cmd->scmd->sense_buffer, cmd->sense, 1219 SCSI_SENSE_BUFFERSIZE); 1220 cmd->scmd->result |= DRIVER_SENSE << 24; 1221 } 1222 break; 1223 1224 case MFI_STAT_LD_OFFLINE: 1225 case MFI_STAT_DEVICE_NOT_FOUND: 1226 cmd->scmd->result = DID_BAD_TARGET << 16; 1227 break; 1228 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1229 cmd->scmd->result = DID_IMM_RETRY << 16; 1230 break; 1231 default: 1232 printk(KERN_DEBUG "megasas: FW status %#x\n", status); 1233 cmd->scmd->result = DID_ERROR << 16; 1234 break; 1235 } 1236} 1237 1238/** 1239 * megasas_make_sgl_fusion - Prepares 32-bit SGL 1240 * @instance: Adapter soft state 1241 * @scp: SCSI command from the mid-layer 1242 * @sgl_ptr: SGL to be filled in 1243 * @cmd: cmd we are working on 1244 * 1245 * If successful, this function returns the number of SG elements. 1246 */ 1247static int 1248megasas_make_sgl_fusion(struct megasas_instance *instance, 1249 struct scsi_cmnd *scp, 1250 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 1251 struct megasas_cmd_fusion *cmd) 1252{ 1253 int i, sg_processed, sge_count; 1254 struct scatterlist *os_sgl; 1255 struct fusion_context *fusion; 1256 1257 fusion = instance->ctrl_context; 1258 1259 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1260 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1261 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; 1262 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 1263 sgl_ptr_end->Flags = 0; 1264 } 1265 1266 sge_count = scsi_dma_map(scp); 1267 1268 BUG_ON(sge_count < 0); 1269 1270 if (sge_count > instance->max_num_sge || !sge_count) 1271 return sge_count; 1272 1273 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1274 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); 1275 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); 1276 sgl_ptr->Flags = 0; 1277 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1278 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1279 if (i == sge_count - 1) 1280 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1281 } 1282 sgl_ptr++; 1283 1284 sg_processed = i + 1; 1285 1286 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) && 1287 (sge_count > fusion->max_sge_in_main_msg)) { 1288 1289 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 1290 if ((instance->pdev->device == 1291 PCI_DEVICE_ID_LSI_INVADER) || 1292 (instance->pdev->device == 1293 PCI_DEVICE_ID_LSI_FURY)) { 1294 if ((le16_to_cpu(cmd->io_request->IoFlags) & 1295 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != 1296 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1297 cmd->io_request->ChainOffset = 1298 fusion-> 1299 chain_offset_io_request; 1300 else 1301 cmd->io_request->ChainOffset = 0; 1302 } else 1303 cmd->io_request->ChainOffset = 1304 fusion->chain_offset_io_request; 1305 1306 sg_chain = sgl_ptr; 1307 /* Prepare chain element */ 1308 sg_chain->NextChainOffset = 0; 1309 if ((instance->pdev->device == 1310 PCI_DEVICE_ID_LSI_INVADER) || 1311 (instance->pdev->device == 1312 PCI_DEVICE_ID_LSI_FURY)) 1313 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1314 else 1315 sg_chain->Flags = 1316 (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1317 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1318 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); 1319 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); 1320 1321 sgl_ptr = 1322 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; 1323 } 1324 } 1325 1326 return sge_count; 1327} 1328 1329/** 1330 * megasas_set_pd_lba - Sets PD LBA 1331 * @cdb: CDB 1332 * @cdb_len: cdb length 1333 * @start_blk: Start block of IO 1334 * 1335 * Used to set the PD LBA in CDB for FP IOs 1336 */ 1337void 1338megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 1339 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 1340 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 1341{ 1342 struct MR_LD_RAID *raid; 1343 u32 ld; 1344 u64 start_blk = io_info->pdBlock; 1345 u8 *cdb = io_request->CDB.CDB32; 1346 u32 num_blocks = io_info->numBlocks; 1347 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0; 1348 1349 /* Check if T10 PI (DIF) is enabled for this LD */ 1350 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr); 1351 raid = MR_LdRaidGet(ld, local_map_ptr); 1352 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) { 1353 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1354 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD; 1355 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN; 1356 1357 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1358 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32; 1359 else 1360 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32; 1361 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL; 1362 1363 /* LBA */ 1364 cdb[12] = (u8)((start_blk >> 56) & 0xff); 1365 cdb[13] = (u8)((start_blk >> 48) & 0xff); 1366 cdb[14] = (u8)((start_blk >> 40) & 0xff); 1367 cdb[15] = (u8)((start_blk >> 32) & 0xff); 1368 cdb[16] = (u8)((start_blk >> 24) & 0xff); 1369 cdb[17] = (u8)((start_blk >> 16) & 0xff); 1370 cdb[18] = (u8)((start_blk >> 8) & 0xff); 1371 cdb[19] = (u8)(start_blk & 0xff); 1372 1373 /* Logical block reference tag */ 1374 io_request->CDB.EEDP32.PrimaryReferenceTag = 1375 cpu_to_be32(ref_tag); 1376 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; 1377 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ 1378 1379 /* Transfer length */ 1380 cdb[28] = (u8)((num_blocks >> 24) & 0xff); 1381 cdb[29] = (u8)((num_blocks >> 16) & 0xff); 1382 cdb[30] = (u8)((num_blocks >> 8) & 0xff); 1383 cdb[31] = (u8)(num_blocks & 0xff); 1384 1385 /* set SCSI IO EEDPFlags */ 1386 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { 1387 io_request->EEDPFlags = cpu_to_le16( 1388 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1389 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1390 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | 1391 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | 1392 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1393 } else { 1394 io_request->EEDPFlags = cpu_to_le16( 1395 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1396 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); 1397 } 1398 io_request->Control |= cpu_to_le32((0x4 << 26)); 1399 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); 1400 } else { 1401 /* Some drives don't support 16/12 byte CDB's, convert to 10 */ 1402 if (((cdb_len == 12) || (cdb_len == 16)) && 1403 (start_blk <= 0xffffffff)) { 1404 if (cdb_len == 16) { 1405 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10; 1406 flagvals = cdb[1]; 1407 groupnum = cdb[14]; 1408 control = cdb[15]; 1409 } else { 1410 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10; 1411 flagvals = cdb[1]; 1412 groupnum = cdb[10]; 1413 control = cdb[11]; 1414 } 1415 1416 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1417 1418 cdb[0] = opcode; 1419 cdb[1] = flagvals; 1420 cdb[6] = groupnum; 1421 cdb[9] = control; 1422 1423 /* Transfer length */ 1424 cdb[8] = (u8)(num_blocks & 0xff); 1425 cdb[7] = (u8)((num_blocks >> 8) & 0xff); 1426 1427 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ 1428 cdb_len = 10; 1429 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { 1430 /* Convert to 16 byte CDB for large LBA's */ 1431 switch (cdb_len) { 1432 case 6: 1433 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16; 1434 control = cdb[5]; 1435 break; 1436 case 10: 1437 opcode = 1438 cdb[0] == READ_10 ? READ_16 : WRITE_16; 1439 flagvals = cdb[1]; 1440 groupnum = cdb[6]; 1441 control = cdb[9]; 1442 break; 1443 case 12: 1444 opcode = 1445 cdb[0] == READ_12 ? READ_16 : WRITE_16; 1446 flagvals = cdb[1]; 1447 groupnum = cdb[10]; 1448 control = cdb[11]; 1449 break; 1450 } 1451 1452 memset(cdb, 0, sizeof(io_request->CDB.CDB32)); 1453 1454 cdb[0] = opcode; 1455 cdb[1] = flagvals; 1456 cdb[14] = groupnum; 1457 cdb[15] = control; 1458 1459 /* Transfer length */ 1460 cdb[13] = (u8)(num_blocks & 0xff); 1461 cdb[12] = (u8)((num_blocks >> 8) & 0xff); 1462 cdb[11] = (u8)((num_blocks >> 16) & 0xff); 1463 cdb[10] = (u8)((num_blocks >> 24) & 0xff); 1464 1465 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ 1466 cdb_len = 16; 1467 } 1468 1469 /* Normal case, just load LBA here */ 1470 switch (cdb_len) { 1471 case 6: 1472 { 1473 u8 val = cdb[1] & 0xE0; 1474 cdb[3] = (u8)(start_blk & 0xff); 1475 cdb[2] = (u8)((start_blk >> 8) & 0xff); 1476 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f); 1477 break; 1478 } 1479 case 10: 1480 cdb[5] = (u8)(start_blk & 0xff); 1481 cdb[4] = (u8)((start_blk >> 8) & 0xff); 1482 cdb[3] = (u8)((start_blk >> 16) & 0xff); 1483 cdb[2] = (u8)((start_blk >> 24) & 0xff); 1484 break; 1485 case 12: 1486 cdb[5] = (u8)(start_blk & 0xff); 1487 cdb[4] = (u8)((start_blk >> 8) & 0xff); 1488 cdb[3] = (u8)((start_blk >> 16) & 0xff); 1489 cdb[2] = (u8)((start_blk >> 24) & 0xff); 1490 break; 1491 case 16: 1492 cdb[9] = (u8)(start_blk & 0xff); 1493 cdb[8] = (u8)((start_blk >> 8) & 0xff); 1494 cdb[7] = (u8)((start_blk >> 16) & 0xff); 1495 cdb[6] = (u8)((start_blk >> 24) & 0xff); 1496 cdb[5] = (u8)((start_blk >> 32) & 0xff); 1497 cdb[4] = (u8)((start_blk >> 40) & 0xff); 1498 cdb[3] = (u8)((start_blk >> 48) & 0xff); 1499 cdb[2] = (u8)((start_blk >> 56) & 0xff); 1500 break; 1501 } 1502 } 1503} 1504 1505/** 1506 * megasas_build_ldio_fusion - Prepares IOs to devices 1507 * @instance: Adapter soft state 1508 * @scp: SCSI command 1509 * @cmd: Command to be prepared 1510 * 1511 * Prepares the io_request and chain elements (sg_frame) for IO 1512 * The IO can be for PD (Fast Path) or LD 1513 */ 1514void 1515megasas_build_ldio_fusion(struct megasas_instance *instance, 1516 struct scsi_cmnd *scp, 1517 struct megasas_cmd_fusion *cmd) 1518{ 1519 u8 fp_possible; 1520 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; 1521 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1522 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1523 struct IO_REQUEST_INFO io_info; 1524 struct fusion_context *fusion; 1525 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1526 u8 *raidLUN; 1527 1528 device_id = MEGASAS_DEV_INDEX(instance, scp); 1529 1530 fusion = instance->ctrl_context; 1531 1532 io_request = cmd->io_request; 1533 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 1534 io_request->RaidContext.status = 0; 1535 io_request->RaidContext.exStatus = 0; 1536 1537 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; 1538 1539 start_lba_lo = 0; 1540 start_lba_hi = 0; 1541 fp_possible = 0; 1542 1543 /* 1544 * 6-byte READ(0x08) or WRITE(0x0A) cdb 1545 */ 1546 if (scp->cmd_len == 6) { 1547 datalength = (u32) scp->cmnd[4]; 1548 start_lba_lo = ((u32) scp->cmnd[1] << 16) | 1549 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; 1550 1551 start_lba_lo &= 0x1FFFFF; 1552 } 1553 1554 /* 1555 * 10-byte READ(0x28) or WRITE(0x2A) cdb 1556 */ 1557 else if (scp->cmd_len == 10) { 1558 datalength = (u32) scp->cmnd[8] | 1559 ((u32) scp->cmnd[7] << 8); 1560 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1561 ((u32) scp->cmnd[3] << 16) | 1562 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1563 } 1564 1565 /* 1566 * 12-byte READ(0xA8) or WRITE(0xAA) cdb 1567 */ 1568 else if (scp->cmd_len == 12) { 1569 datalength = ((u32) scp->cmnd[6] << 24) | 1570 ((u32) scp->cmnd[7] << 16) | 1571 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1572 start_lba_lo = ((u32) scp->cmnd[2] << 24) | 1573 ((u32) scp->cmnd[3] << 16) | 1574 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1575 } 1576 1577 /* 1578 * 16-byte READ(0x88) or WRITE(0x8A) cdb 1579 */ 1580 else if (scp->cmd_len == 16) { 1581 datalength = ((u32) scp->cmnd[10] << 24) | 1582 ((u32) scp->cmnd[11] << 16) | 1583 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; 1584 start_lba_lo = ((u32) scp->cmnd[6] << 24) | 1585 ((u32) scp->cmnd[7] << 16) | 1586 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; 1587 1588 start_lba_hi = ((u32) scp->cmnd[2] << 24) | 1589 ((u32) scp->cmnd[3] << 16) | 1590 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; 1591 } 1592 1593 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1594 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; 1595 io_info.numBlocks = datalength; 1596 io_info.ldTgtId = device_id; 1597 io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); 1598 1599 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1600 io_info.isRead = 1; 1601 1602 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1603 1604 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= 1605 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) { 1606 io_request->RaidContext.regLockFlags = 0; 1607 fp_possible = 0; 1608 } else { 1609 if (MR_BuildRaidContext(instance, &io_info, 1610 &io_request->RaidContext, 1611 local_map_ptr, &raidLUN)) 1612 fp_possible = io_info.fpOkForIo; 1613 } 1614 1615 /* Use smp_processor_id() for now until cmd->request->cpu is CPU 1616 id by default, not CPU group id, otherwise all MSI-X queues won't 1617 be utilized */ 1618 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? 1619 smp_processor_id() % instance->msix_vectors : 0; 1620 1621 if (fp_possible) { 1622 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 1623 local_map_ptr, start_lba_lo); 1624 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1625 cmd->request_desc->SCSIIO.RequestFlags = 1626 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY 1627 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1628 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1629 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1630 if (io_request->RaidContext.regLockFlags == 1631 REGION_TYPE_UNUSED) 1632 cmd->request_desc->SCSIIO.RequestFlags = 1633 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1634 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1635 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1636 io_request->RaidContext.nseg = 0x1; 1637 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1638 io_request->RaidContext.regLockFlags |= 1639 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1640 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1641 } 1642 if ((fusion->load_balance_info[device_id].loadBalanceFlag) && 1643 (io_info.isRead)) { 1644 io_info.devHandle = 1645 get_updated_dev_handle(instance, 1646 &fusion->load_balance_info[device_id], 1647 &io_info); 1648 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 1649 cmd->pd_r1_lb = io_info.pd_after_lb; 1650 } else 1651 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1652 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1653 io_request->DevHandle = io_info.devHandle; 1654 /* populate the LUN field */ 1655 memcpy(io_request->LUN, raidLUN, 8); 1656 } else { 1657 io_request->RaidContext.timeoutValue = 1658 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); 1659 cmd->request_desc->SCSIIO.RequestFlags = 1660 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 1661 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1662 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1663 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1664 if (io_request->RaidContext.regLockFlags == 1665 REGION_TYPE_UNUSED) 1666 cmd->request_desc->SCSIIO.RequestFlags = 1667 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1668 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1669 io_request->RaidContext.Type = MPI2_TYPE_CUDA; 1670 io_request->RaidContext.regLockFlags |= 1671 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 1672 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1673 io_request->RaidContext.nseg = 0x1; 1674 } 1675 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1676 io_request->DevHandle = cpu_to_le16(device_id); 1677 } /* Not FP */ 1678} 1679 1680/** 1681 * megasas_build_dcdb_fusion - Prepares IOs to devices 1682 * @instance: Adapter soft state 1683 * @scp: SCSI command 1684 * @cmd: Command to be prepared 1685 * 1686 * Prepares the io_request frame for non-io cmds 1687 */ 1688static void 1689megasas_build_dcdb_fusion(struct megasas_instance *instance, 1690 struct scsi_cmnd *scmd, 1691 struct megasas_cmd_fusion *cmd) 1692{ 1693 u32 device_id; 1694 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1695 u16 pd_index = 0; 1696 struct MR_DRV_RAID_MAP_ALL *local_map_ptr; 1697 struct fusion_context *fusion = instance->ctrl_context; 1698 u8 span, physArm; 1699 u16 devHandle; 1700 u32 ld, arRef, pd; 1701 struct MR_LD_RAID *raid; 1702 struct RAID_CONTEXT *pRAID_Context; 1703 1704 io_request = cmd->io_request; 1705 device_id = MEGASAS_DEV_INDEX(instance, scmd); 1706 pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 1707 +scmd->device->id; 1708 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)]; 1709 1710 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1711 1712 1713 /* Check if this is a system PD I/O */ 1714 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && 1715 instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { 1716 io_request->Function = 0; 1717 if (fusion->fast_path_io) 1718 io_request->DevHandle = 1719 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1720 io_request->RaidContext.timeoutValue = 1721 local_map_ptr->raidMap.fpPdIoTimeoutSec; 1722 io_request->RaidContext.regLockFlags = 0; 1723 io_request->RaidContext.regLockRowLBA = 0; 1724 io_request->RaidContext.regLockLength = 0; 1725 io_request->RaidContext.RAIDFlags = 1726 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << 1727 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1728 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1729 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 1730 io_request->IoFlags |= cpu_to_le16( 1731 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); 1732 cmd->request_desc->SCSIIO.RequestFlags = 1733 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1734 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1735 cmd->request_desc->SCSIIO.DevHandle = 1736 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1737 cmd->request_desc->SCSIIO.MSIxIndex = 1738 instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0; 1739 /* 1740 * If the command is for the tape device, set the 1741 * FP timeout to the os layer timeout value. 1742 */ 1743 if (scmd->device->type == TYPE_TAPE) { 1744 if ((scmd->request->timeout / HZ) > 0xFFFF) 1745 io_request->RaidContext.timeoutValue = 1746 0xFFFF; 1747 else 1748 io_request->RaidContext.timeoutValue = 1749 scmd->request->timeout / HZ; 1750 } 1751 } else { 1752 if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) 1753 goto NonFastPath; 1754 1755 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1756 if ((ld >= instance->fw_supported_vd_count) || 1757 (!fusion->fast_path_io)) 1758 goto NonFastPath; 1759 1760 raid = MR_LdRaidGet(ld, local_map_ptr); 1761 1762 /* check if this LD is FP capable */ 1763 if (!(raid->capability.fpNonRWCapable)) 1764 /* not FP capable, send as non-FP */ 1765 goto NonFastPath; 1766 1767 /* get RAID_Context pointer */ 1768 pRAID_Context = &io_request->RaidContext; 1769 1770 /* set RAID context values */ 1771 pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; 1772 pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; 1773 pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); 1774 pRAID_Context->regLockRowLBA = 0; 1775 pRAID_Context->regLockLength = 0; 1776 pRAID_Context->configSeqNum = raid->seqNum; 1777 1778 /* get the DevHandle for the PD (since this is 1779 fpNonRWCapable, this is a single disk RAID0) */ 1780 span = physArm = 0; 1781 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); 1782 pd = MR_ArPdGet(arRef, physArm, local_map_ptr); 1783 devHandle = MR_PdDevHandleGet(pd, local_map_ptr); 1784 1785 /* build request descriptor */ 1786 cmd->request_desc->SCSIIO.RequestFlags = 1787 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 1788 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1789 cmd->request_desc->SCSIIO.DevHandle = devHandle; 1790 1791 /* populate the LUN field */ 1792 memcpy(io_request->LUN, raid->LUN, 8); 1793 1794 /* build the raidScsiIO structure */ 1795 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1796 io_request->DevHandle = devHandle; 1797 1798 return; 1799 1800NonFastPath: 1801 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; 1802 io_request->DevHandle = cpu_to_le16(device_id); 1803 cmd->request_desc->SCSIIO.RequestFlags = 1804 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1805 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1806 } 1807 io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); 1808 int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN); 1809} 1810 1811/** 1812 * megasas_build_io_fusion - Prepares IOs to devices 1813 * @instance: Adapter soft state 1814 * @scp: SCSI command 1815 * @cmd: Command to be prepared 1816 * 1817 * Invokes helper functions to prepare request frames 1818 * and sets flags appropriate for IO/Non-IO cmd 1819 */ 1820int 1821megasas_build_io_fusion(struct megasas_instance *instance, 1822 struct scsi_cmnd *scp, 1823 struct megasas_cmd_fusion *cmd) 1824{ 1825 u32 device_id, sge_count; 1826 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request; 1827 1828 device_id = MEGASAS_DEV_INDEX(instance, scp); 1829 1830 /* Zero out some fields so they don't get reused */ 1831 memset(io_request->LUN, 0x0, 8); 1832 io_request->CDB.EEDP32.PrimaryReferenceTag = 0; 1833 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; 1834 io_request->EEDPFlags = 0; 1835 io_request->Control = 0; 1836 io_request->EEDPBlockSize = 0; 1837 io_request->ChainOffset = 0; 1838 io_request->RaidContext.RAIDFlags = 0; 1839 io_request->RaidContext.Type = 0; 1840 io_request->RaidContext.nseg = 0; 1841 1842 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 1843 /* 1844 * Just the CDB length,rest of the Flags are zero 1845 * This will be modified for FP in build_ldio_fusion 1846 */ 1847 io_request->IoFlags = cpu_to_le16(scp->cmd_len); 1848 1849 if (megasas_is_ldio(scp)) 1850 megasas_build_ldio_fusion(instance, scp, cmd); 1851 else 1852 megasas_build_dcdb_fusion(instance, scp, cmd); 1853 1854 /* 1855 * Construct SGL 1856 */ 1857 1858 sge_count = 1859 megasas_make_sgl_fusion(instance, scp, 1860 (struct MPI25_IEEE_SGE_CHAIN64 *) 1861 &io_request->SGL, cmd); 1862 1863 if (sge_count > instance->max_num_sge) { 1864 printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds " 1865 "max (0x%x) allowed\n", sge_count, 1866 instance->max_num_sge); 1867 return 1; 1868 } 1869 1870 io_request->RaidContext.numSGE = sge_count; 1871 1872 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); 1873 1874 if (scp->sc_data_direction == PCI_DMA_TODEVICE) 1875 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); 1876 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1877 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); 1878 1879 io_request->SGLOffset0 = 1880 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; 1881 1882 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); 1883 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 1884 1885 cmd->scmd = scp; 1886 scp->SCp.ptr = (char *)cmd; 1887 1888 return 0; 1889} 1890 1891union MEGASAS_REQUEST_DESCRIPTOR_UNION * 1892megasas_get_request_descriptor(struct megasas_instance *instance, u16 index) 1893{ 1894 u8 *p; 1895 struct fusion_context *fusion; 1896 1897 if (index >= instance->max_fw_cmds) { 1898 printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " 1899 "descriptor for scsi%d\n", index, 1900 instance->host->host_no); 1901 return NULL; 1902 } 1903 fusion = instance->ctrl_context; 1904 p = fusion->req_frames_desc 1905 +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index; 1906 1907 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p; 1908} 1909 1910/** 1911 * megasas_build_and_issue_cmd_fusion -Main routine for building and 1912 * issuing non IOCTL cmd 1913 * @instance: Adapter soft state 1914 * @scmd: pointer to scsi cmd from OS 1915 */ 1916static u32 1917megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, 1918 struct scsi_cmnd *scmd) 1919{ 1920 struct megasas_cmd_fusion *cmd; 1921 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1922 u32 index; 1923 struct fusion_context *fusion; 1924 1925 fusion = instance->ctrl_context; 1926 1927 cmd = megasas_get_cmd_fusion(instance); 1928 if (!cmd) 1929 return SCSI_MLQUEUE_HOST_BUSY; 1930 1931 index = cmd->index; 1932 1933 req_desc = megasas_get_request_descriptor(instance, index-1); 1934 if (!req_desc) 1935 return 1; 1936 1937 req_desc->Words = 0; 1938 cmd->request_desc = req_desc; 1939 1940 if (megasas_build_io_fusion(instance, scmd, cmd)) { 1941 megasas_return_cmd_fusion(instance, cmd); 1942 printk(KERN_ERR "megasas: Error building command.\n"); 1943 cmd->request_desc = NULL; 1944 return 1; 1945 } 1946 1947 req_desc = cmd->request_desc; 1948 req_desc->SCSIIO.SMID = cpu_to_le16(index); 1949 1950 if (cmd->io_request->ChainOffset != 0 && 1951 cmd->io_request->ChainOffset != 0xF) 1952 printk(KERN_ERR "megasas: The chain offset value is not " 1953 "correct : %x\n", cmd->io_request->ChainOffset); 1954 1955 /* 1956 * Issue the command to the FW 1957 */ 1958 atomic_inc(&instance->fw_outstanding); 1959 1960 instance->instancet->fire_cmd(instance, 1961 req_desc->u.low, req_desc->u.high, 1962 instance->reg_set); 1963 1964 return 0; 1965} 1966 1967/** 1968 * complete_cmd_fusion - Completes command 1969 * @instance: Adapter soft state 1970 * Completes all commands that is in reply descriptor queue 1971 */ 1972int 1973complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) 1974{ 1975 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 1976 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1977 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; 1978 struct fusion_context *fusion; 1979 struct megasas_cmd *cmd_mfi; 1980 struct megasas_cmd_fusion *cmd_fusion; 1981 u16 smid, num_completed; 1982 u8 reply_descript_type; 1983 u32 status, extStatus, device_id; 1984 union desc_value d_val; 1985 struct LD_LOAD_BALANCE_INFO *lbinfo; 1986 int threshold_reply_count = 0; 1987 1988 fusion = instance->ctrl_context; 1989 1990 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) 1991 return IRQ_HANDLED; 1992 1993 desc = fusion->reply_frames_desc; 1994 desc += ((MSIxIndex * fusion->reply_alloc_sz)/ 1995 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) + 1996 fusion->last_reply_idx[MSIxIndex]; 1997 1998 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1999 2000 d_val.word = desc->Words; 2001 2002 reply_descript_type = reply_desc->ReplyFlags & 2003 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2004 2005 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 2006 return IRQ_NONE; 2007 2008 num_completed = 0; 2009 2010 while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { 2011 smid = le16_to_cpu(reply_desc->SMID); 2012 2013 cmd_fusion = fusion->cmd_list[smid - 1]; 2014 2015 scsi_io_req = 2016 (struct MPI2_RAID_SCSI_IO_REQUEST *) 2017 cmd_fusion->io_request; 2018 2019 if (cmd_fusion->scmd) 2020 cmd_fusion->scmd->SCp.ptr = NULL; 2021 2022 status = scsi_io_req->RaidContext.status; 2023 extStatus = scsi_io_req->RaidContext.exStatus; 2024 2025 switch (scsi_io_req->Function) { 2026 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/ 2027 /* Update load balancing info */ 2028 device_id = MEGASAS_DEV_INDEX(instance, 2029 cmd_fusion->scmd); 2030 lbinfo = &fusion->load_balance_info[device_id]; 2031 if (cmd_fusion->scmd->SCp.Status & 2032 MEGASAS_LOAD_BALANCE_FLAG) { 2033 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]); 2034 cmd_fusion->scmd->SCp.Status &= 2035 ~MEGASAS_LOAD_BALANCE_FLAG; 2036 } 2037 if (reply_descript_type == 2038 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 2039 if (megasas_dbg_lvl == 5) 2040 printk(KERN_ERR "\nmegasas: FAST Path " 2041 "IO Success\n"); 2042 } 2043 /* Fall thru and complete IO */ 2044 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */ 2045 /* Map the FW Cmd Status */ 2046 map_cmd_status(cmd_fusion, status, extStatus); 2047 scsi_dma_unmap(cmd_fusion->scmd); 2048 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); 2049 scsi_io_req->RaidContext.status = 0; 2050 scsi_io_req->RaidContext.exStatus = 0; 2051 megasas_return_cmd_fusion(instance, cmd_fusion); 2052 atomic_dec(&instance->fw_outstanding); 2053 2054 break; 2055 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 2056 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2057 2058 if (!cmd_mfi->mpt_pthr_cmd_blocked) { 2059 if (megasas_dbg_lvl == 5) 2060 dev_info(&instance->pdev->dev, 2061 "freeing mfi/mpt pass-through " 2062 "from %s %d\n", 2063 __func__, __LINE__); 2064 megasas_return_mfi_mpt_pthr(instance, cmd_mfi, 2065 cmd_fusion); 2066 } 2067 2068 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2069 cmd_fusion->flags = 0; 2070 break; 2071 } 2072 2073 fusion->last_reply_idx[MSIxIndex]++; 2074 if (fusion->last_reply_idx[MSIxIndex] >= 2075 fusion->reply_q_depth) 2076 fusion->last_reply_idx[MSIxIndex] = 0; 2077 2078 desc->Words = ULLONG_MAX; 2079 num_completed++; 2080 threshold_reply_count++; 2081 2082 /* Get the next reply descriptor */ 2083 if (!fusion->last_reply_idx[MSIxIndex]) 2084 desc = fusion->reply_frames_desc + 2085 ((MSIxIndex * fusion->reply_alloc_sz)/ 2086 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)); 2087 else 2088 desc++; 2089 2090 reply_desc = 2091 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 2092 2093 d_val.word = desc->Words; 2094 2095 reply_descript_type = reply_desc->ReplyFlags & 2096 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2097 2098 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 2099 break; 2100 /* 2101 * Write to reply post host index register after completing threshold 2102 * number of reply counts and still there are more replies in reply queue 2103 * pending to be completed 2104 */ 2105 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 2106 if ((instance->pdev->device == 2107 PCI_DEVICE_ID_LSI_INVADER) || 2108 (instance->pdev->device == 2109 PCI_DEVICE_ID_LSI_FURY)) 2110 writel(((MSIxIndex & 0x7) << 24) | 2111 fusion->last_reply_idx[MSIxIndex], 2112 instance->reply_post_host_index_addr[MSIxIndex/8]); 2113 else 2114 writel((MSIxIndex << 24) | 2115 fusion->last_reply_idx[MSIxIndex], 2116 instance->reply_post_host_index_addr[0]); 2117 threshold_reply_count = 0; 2118 } 2119 } 2120 2121 if (!num_completed) 2122 return IRQ_NONE; 2123 2124 wmb(); 2125 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 2126 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 2127 writel(((MSIxIndex & 0x7) << 24) | 2128 fusion->last_reply_idx[MSIxIndex], 2129 instance->reply_post_host_index_addr[MSIxIndex/8]); 2130 else 2131 writel((MSIxIndex << 24) | 2132 fusion->last_reply_idx[MSIxIndex], 2133 instance->reply_post_host_index_addr[0]); 2134 megasas_check_and_restore_queue_depth(instance); 2135 return IRQ_HANDLED; 2136} 2137 2138/** 2139 * megasas_complete_cmd_dpc_fusion - Completes command 2140 * @instance: Adapter soft state 2141 * 2142 * Tasklet to complete cmds 2143 */ 2144void 2145megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) 2146{ 2147 struct megasas_instance *instance = 2148 (struct megasas_instance *)instance_addr; 2149 unsigned long flags; 2150 u32 count, MSIxIndex; 2151 2152 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 2153 2154 /* If we have already declared adapter dead, donot complete cmds */ 2155 spin_lock_irqsave(&instance->hba_lock, flags); 2156 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2157 spin_unlock_irqrestore(&instance->hba_lock, flags); 2158 return; 2159 } 2160 spin_unlock_irqrestore(&instance->hba_lock, flags); 2161 2162 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) 2163 complete_cmd_fusion(instance, MSIxIndex); 2164} 2165 2166/** 2167 * megasas_isr_fusion - isr entry point 2168 */ 2169irqreturn_t megasas_isr_fusion(int irq, void *devp) 2170{ 2171 struct megasas_irq_context *irq_context = devp; 2172 struct megasas_instance *instance = irq_context->instance; 2173 u32 mfiStatus, fw_state, dma_state; 2174 2175 if (instance->mask_interrupts) 2176 return IRQ_NONE; 2177 2178 if (!instance->msix_vectors) { 2179 mfiStatus = instance->instancet->clear_intr(instance->reg_set); 2180 if (!mfiStatus) 2181 return IRQ_NONE; 2182 } 2183 2184 /* If we are resetting, bail */ 2185 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { 2186 instance->instancet->clear_intr(instance->reg_set); 2187 return IRQ_HANDLED; 2188 } 2189 2190 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) { 2191 instance->instancet->clear_intr(instance->reg_set); 2192 /* If we didn't complete any commands, check for FW fault */ 2193 fw_state = instance->instancet->read_fw_status_reg( 2194 instance->reg_set) & MFI_STATE_MASK; 2195 dma_state = instance->instancet->read_fw_status_reg 2196 (instance->reg_set) & MFI_STATE_DMADONE; 2197 if (instance->crash_dump_drv_support && 2198 instance->crash_dump_app_support) { 2199 /* Start collecting crash, if DMA bit is done */ 2200 if ((fw_state == MFI_STATE_FAULT) && dma_state) 2201 schedule_work(&instance->crash_init); 2202 else if (fw_state == MFI_STATE_FAULT) 2203 schedule_work(&instance->work_init); 2204 } else if (fw_state == MFI_STATE_FAULT) { 2205 printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt" 2206 "for scsi%d\n", instance->host->host_no); 2207 schedule_work(&instance->work_init); 2208 } 2209 } 2210 2211 return IRQ_HANDLED; 2212} 2213 2214/** 2215 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru 2216 * @instance: Adapter soft state 2217 * mfi_cmd: megasas_cmd pointer 2218 * 2219 */ 2220u8 2221build_mpt_mfi_pass_thru(struct megasas_instance *instance, 2222 struct megasas_cmd *mfi_cmd) 2223{ 2224 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 2225 struct MPI2_RAID_SCSI_IO_REQUEST *io_req; 2226 struct megasas_cmd_fusion *cmd; 2227 struct fusion_context *fusion; 2228 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 2229 u32 opcode; 2230 2231 cmd = megasas_get_cmd_fusion(instance); 2232 if (!cmd) 2233 return 1; 2234 2235 /* Save the smid. To be used for returning the cmd */ 2236 mfi_cmd->context.smid = cmd->index; 2237 cmd->sync_cmd_idx = mfi_cmd->index; 2238 2239 /* Set this only for Blocked commands */ 2240 opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode); 2241 if ((opcode == MR_DCMD_LD_MAP_GET_INFO) 2242 && (mfi_cmd->frame->dcmd.mbox.b[1] == 1)) 2243 mfi_cmd->is_wait_event = 1; 2244 2245 if (opcode == MR_DCMD_CTRL_EVENT_WAIT) 2246 mfi_cmd->is_wait_event = 1; 2247 2248 if (mfi_cmd->is_wait_event) 2249 mfi_cmd->mpt_pthr_cmd_blocked = cmd; 2250 2251 /* 2252 * For cmds where the flag is set, store the flag and check 2253 * on completion. For cmds with this flag, don't call 2254 * megasas_complete_cmd 2255 */ 2256 2257 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 2258 cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2259 2260 fusion = instance->ctrl_context; 2261 io_req = cmd->io_request; 2262 2263 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 2264 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 2265 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = 2266 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; 2267 sgl_ptr_end += fusion->max_sge_in_main_msg - 1; 2268 sgl_ptr_end->Flags = 0; 2269 } 2270 2271 mpi25_ieee_chain = 2272 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 2273 2274 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 2275 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, 2276 SGL) / 4; 2277 io_req->ChainOffset = fusion->chain_offset_mfi_pthru; 2278 2279 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); 2280 2281 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2282 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2283 2284 mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME); 2285 2286 return 0; 2287} 2288 2289/** 2290 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd 2291 * @instance: Adapter soft state 2292 * @cmd: mfi cmd to build 2293 * 2294 */ 2295union MEGASAS_REQUEST_DESCRIPTOR_UNION * 2296build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 2297{ 2298 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2299 u16 index; 2300 2301 if (build_mpt_mfi_pass_thru(instance, cmd)) { 2302 printk(KERN_ERR "Couldn't build MFI pass thru cmd\n"); 2303 return NULL; 2304 } 2305 2306 index = cmd->context.smid; 2307 2308 req_desc = megasas_get_request_descriptor(instance, index - 1); 2309 2310 if (!req_desc) 2311 return NULL; 2312 2313 req_desc->Words = 0; 2314 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2315 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2316 2317 req_desc->SCSIIO.SMID = cpu_to_le16(index); 2318 2319 return req_desc; 2320} 2321 2322/** 2323 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd 2324 * @instance: Adapter soft state 2325 * @cmd: mfi cmd pointer 2326 * 2327 */ 2328void 2329megasas_issue_dcmd_fusion(struct megasas_instance *instance, 2330 struct megasas_cmd *cmd) 2331{ 2332 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2333 2334 req_desc = build_mpt_cmd(instance, cmd); 2335 if (!req_desc) { 2336 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); 2337 return; 2338 } 2339 atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED); 2340 instance->instancet->fire_cmd(instance, req_desc->u.low, 2341 req_desc->u.high, instance->reg_set); 2342} 2343 2344/** 2345 * megasas_release_fusion - Reverses the FW initialization 2346 * @intance: Adapter soft state 2347 */ 2348void 2349megasas_release_fusion(struct megasas_instance *instance) 2350{ 2351 megasas_free_cmds(instance); 2352 megasas_free_cmds_fusion(instance); 2353 2354 iounmap(instance->reg_set); 2355 2356 pci_release_selected_regions(instance->pdev, instance->bar); 2357} 2358 2359/** 2360 * megasas_read_fw_status_reg_fusion - returns the current FW status value 2361 * @regs: MFI register set 2362 */ 2363static u32 2364megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs) 2365{ 2366 return readl(&(regs)->outbound_scratch_pad); 2367} 2368 2369/** 2370 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware 2371 * @instance: Controller's soft instance 2372 * return: Number of allocated host crash buffers 2373 */ 2374static void 2375megasas_alloc_host_crash_buffer(struct megasas_instance *instance) 2376{ 2377 unsigned int i; 2378 2379 instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE); 2380 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) { 2381 instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL, 2382 instance->crash_buf_pages); 2383 if (!instance->crash_buf[i]) { 2384 dev_info(&instance->pdev->dev, "Firmware crash dump " 2385 "memory allocation failed at index %d\n", i); 2386 break; 2387 } 2388 } 2389 instance->drv_buf_alloc = i; 2390} 2391 2392/** 2393 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware 2394 * @instance: Controller's soft instance 2395 */ 2396void 2397megasas_free_host_crash_buffer(struct megasas_instance *instance) 2398{ 2399 unsigned int i 2400; 2401 for (i = 0; i < instance->drv_buf_alloc; i++) { 2402 if (instance->crash_buf[i]) 2403 free_pages((ulong)instance->crash_buf[i], 2404 instance->crash_buf_pages); 2405 } 2406 instance->drv_buf_index = 0; 2407 instance->drv_buf_alloc = 0; 2408 instance->fw_crash_state = UNAVAILABLE; 2409 instance->fw_crash_buffer_size = 0; 2410} 2411 2412/** 2413 * megasas_adp_reset_fusion - For controller reset 2414 * @regs: MFI register set 2415 */ 2416static int 2417megasas_adp_reset_fusion(struct megasas_instance *instance, 2418 struct megasas_register_set __iomem *regs) 2419{ 2420 return 0; 2421} 2422 2423/** 2424 * megasas_check_reset_fusion - For controller reset check 2425 * @regs: MFI register set 2426 */ 2427static int 2428megasas_check_reset_fusion(struct megasas_instance *instance, 2429 struct megasas_register_set __iomem *regs) 2430{ 2431 return 0; 2432} 2433 2434/* This function waits for outstanding commands on fusion to complete */ 2435int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, 2436 int iotimeout, int *convert) 2437{ 2438 int i, outstanding, retval = 0, hb_seconds_missed = 0; 2439 u32 fw_state; 2440 2441 for (i = 0; i < resetwaittime; i++) { 2442 /* Check if firmware is in fault state */ 2443 fw_state = instance->instancet->read_fw_status_reg( 2444 instance->reg_set) & MFI_STATE_MASK; 2445 if (fw_state == MFI_STATE_FAULT) { 2446 printk(KERN_WARNING "megasas: Found FW in FAULT state," 2447 " will reset adapter scsi%d.\n", 2448 instance->host->host_no); 2449 retval = 1; 2450 goto out; 2451 } 2452 /* If SR-IOV VF mode & heartbeat timeout, don't wait */ 2453 if (instance->requestorId && !iotimeout) { 2454 retval = 1; 2455 goto out; 2456 } 2457 2458 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */ 2459 if (instance->requestorId && iotimeout) { 2460 if (instance->hb_host_mem->HB.fwCounter != 2461 instance->hb_host_mem->HB.driverCounter) { 2462 instance->hb_host_mem->HB.driverCounter = 2463 instance->hb_host_mem->HB.fwCounter; 2464 hb_seconds_missed = 0; 2465 } else { 2466 hb_seconds_missed++; 2467 if (hb_seconds_missed == 2468 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 2469 printk(KERN_WARNING "megasas: SR-IOV:" 2470 " Heartbeat never completed " 2471 " while polling during I/O " 2472 " timeout handling for " 2473 "scsi%d.\n", 2474 instance->host->host_no); 2475 *convert = 1; 2476 retval = 1; 2477 goto out; 2478 } 2479 } 2480 } 2481 2482 outstanding = atomic_read(&instance->fw_outstanding); 2483 if (!outstanding) 2484 goto out; 2485 2486 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2487 printk(KERN_NOTICE "megasas: [%2d]waiting for %d " 2488 "commands to complete for scsi%d\n", i, 2489 outstanding, instance->host->host_no); 2490 megasas_complete_cmd_dpc_fusion( 2491 (unsigned long)instance); 2492 } 2493 msleep(1000); 2494 } 2495 2496 if (atomic_read(&instance->fw_outstanding)) { 2497 printk("megaraid_sas: pending commands remain after waiting, " 2498 "will reset adapter scsi%d.\n", 2499 instance->host->host_no); 2500 retval = 1; 2501 } 2502out: 2503 return retval; 2504} 2505 2506void megasas_reset_reply_desc(struct megasas_instance *instance) 2507{ 2508 int i, count; 2509 struct fusion_context *fusion; 2510 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 2511 2512 fusion = instance->ctrl_context; 2513 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; 2514 for (i = 0 ; i < count ; i++) 2515 fusion->last_reply_idx[i] = 0; 2516 reply_desc = fusion->reply_frames_desc; 2517 for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) 2518 reply_desc->Words = ULLONG_MAX; 2519} 2520 2521/* Check for a second path that is currently UP */ 2522int megasas_check_mpio_paths(struct megasas_instance *instance, 2523 struct scsi_cmnd *scmd) 2524{ 2525 int i, j, retval = (DID_RESET << 16); 2526 2527 if (instance->mpio && instance->requestorId) { 2528 for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++) 2529 for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++) 2530 if (megasas_mgmt_info.instance[i] && 2531 (megasas_mgmt_info.instance[i] != instance) && 2532 megasas_mgmt_info.instance[i]->mpio && 2533 megasas_mgmt_info.instance[i]->requestorId 2534 && 2535 (megasas_mgmt_info.instance[i]->ld_ids[j] 2536 == scmd->device->id)) { 2537 retval = (DID_NO_CONNECT << 16); 2538 goto out; 2539 } 2540 } 2541out: 2542 return retval; 2543} 2544 2545/* Core fusion reset function */ 2546int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout) 2547{ 2548 int retval = SUCCESS, i, j, retry = 0, convert = 0; 2549 struct megasas_instance *instance; 2550 struct megasas_cmd_fusion *cmd_fusion; 2551 struct fusion_context *fusion; 2552 struct megasas_cmd *cmd_mfi; 2553 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2554 u32 host_diag, abs_state, status_reg, reset_adapter; 2555 u32 io_timeout_in_crash_mode = 0; 2556 2557 instance = (struct megasas_instance *)shost->hostdata; 2558 fusion = instance->ctrl_context; 2559 2560 mutex_lock(&instance->reset_mutex); 2561 2562 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2563 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2564 "returning FAILED for scsi%d.\n", 2565 instance->host->host_no); 2566 mutex_unlock(&instance->reset_mutex); 2567 return FAILED; 2568 } 2569 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 2570 abs_state = status_reg & MFI_STATE_MASK; 2571 2572 /* IO timeout detected, forcibly put FW in FAULT state */ 2573 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf && 2574 instance->crash_dump_app_support && iotimeout) { 2575 dev_info(&instance->pdev->dev, "IO timeout is detected, " 2576 "forcibly FAULT Firmware\n"); 2577 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 2578 status_reg = readl(&instance->reg_set->doorbell); 2579 writel(status_reg | MFI_STATE_FORCE_OCR, 2580 &instance->reg_set->doorbell); 2581 readl(&instance->reg_set->doorbell); 2582 mutex_unlock(&instance->reset_mutex); 2583 do { 2584 ssleep(3); 2585 io_timeout_in_crash_mode++; 2586 dev_dbg(&instance->pdev->dev, "waiting for [%d] " 2587 "seconds for crash dump collection and OCR " 2588 "to be done\n", (io_timeout_in_crash_mode * 3)); 2589 } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && 2590 (io_timeout_in_crash_mode < 80)); 2591 2592 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) { 2593 dev_info(&instance->pdev->dev, "OCR done for IO " 2594 "timeout case\n"); 2595 retval = SUCCESS; 2596 } else { 2597 dev_info(&instance->pdev->dev, "Controller is not " 2598 "operational after 240 seconds wait for IO " 2599 "timeout case in FW crash dump mode\n do " 2600 "OCR/kill adapter\n"); 2601 retval = megasas_reset_fusion(shost, 0); 2602 } 2603 return retval; 2604 } 2605 2606 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 2607 del_timer_sync(&instance->sriov_heartbeat_timer); 2608 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2609 instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING; 2610 instance->instancet->disable_intr(instance); 2611 msleep(1000); 2612 2613 /* First try waiting for commands to complete */ 2614 if (megasas_wait_for_outstanding_fusion(instance, iotimeout, 2615 &convert)) { 2616 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 2617 printk(KERN_WARNING "megaraid_sas: resetting fusion " 2618 "adapter scsi%d.\n", instance->host->host_no); 2619 if (convert) 2620 iotimeout = 0; 2621 2622 /* Now return commands back to the OS */ 2623 for (i = 0 ; i < instance->max_fw_cmds; i++) { 2624 cmd_fusion = fusion->cmd_list[i]; 2625 if (cmd_fusion->scmd) { 2626 scsi_dma_unmap(cmd_fusion->scmd); 2627 cmd_fusion->scmd->result = 2628 megasas_check_mpio_paths(instance, 2629 cmd_fusion->scmd); 2630 cmd_fusion->scmd->scsi_done(cmd_fusion->scmd); 2631 megasas_return_cmd_fusion(instance, cmd_fusion); 2632 atomic_dec(&instance->fw_outstanding); 2633 } 2634 } 2635 2636 status_reg = instance->instancet->read_fw_status_reg( 2637 instance->reg_set); 2638 abs_state = status_reg & MFI_STATE_MASK; 2639 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2640 if (instance->disableOnlineCtrlReset || 2641 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2642 /* Reset not supported, kill adapter */ 2643 printk(KERN_WARNING "megaraid_sas: Reset not supported" 2644 ", killing adapter scsi%d.\n", 2645 instance->host->host_no); 2646 megaraid_sas_kill_hba(instance); 2647 instance->skip_heartbeat_timer_del = 1; 2648 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; 2649 retval = FAILED; 2650 goto out; 2651 } 2652 2653 /* Let SR-IOV VF & PF sync up if there was a HB failure */ 2654 if (instance->requestorId && !iotimeout) { 2655 msleep(MEGASAS_OCR_SETTLE_TIME_VF); 2656 /* Look for a late HB update after VF settle time */ 2657 if (abs_state == MFI_STATE_OPERATIONAL && 2658 (instance->hb_host_mem->HB.fwCounter != 2659 instance->hb_host_mem->HB.driverCounter)) { 2660 instance->hb_host_mem->HB.driverCounter = 2661 instance->hb_host_mem->HB.fwCounter; 2662 printk(KERN_WARNING "megasas: SR-IOV:" 2663 "Late FW heartbeat update for " 2664 "scsi%d.\n", 2665 instance->host->host_no); 2666 } else { 2667 /* In VF mode, first poll for FW ready */ 2668 for (i = 0; 2669 i < (MEGASAS_RESET_WAIT_TIME * 1000); 2670 i += 20) { 2671 status_reg = 2672 instance->instancet-> 2673 read_fw_status_reg( 2674 instance->reg_set); 2675 abs_state = status_reg & 2676 MFI_STATE_MASK; 2677 if (abs_state == MFI_STATE_READY) { 2678 printk(KERN_WARNING "megasas" 2679 ": SR-IOV: FW was found" 2680 "to be in ready state " 2681 "for scsi%d.\n", 2682 instance->host->host_no); 2683 break; 2684 } 2685 msleep(20); 2686 } 2687 if (abs_state != MFI_STATE_READY) { 2688 printk(KERN_WARNING "megasas: SR-IOV: " 2689 "FW not in ready state after %d" 2690 " seconds for scsi%d, status_reg = " 2691 "0x%x.\n", 2692 MEGASAS_RESET_WAIT_TIME, 2693 instance->host->host_no, 2694 status_reg); 2695 megaraid_sas_kill_hba(instance); 2696 instance->skip_heartbeat_timer_del = 1; 2697 instance->adprecovery = 2698 MEGASAS_HW_CRITICAL_ERROR; 2699 retval = FAILED; 2700 goto out; 2701 } 2702 } 2703 } 2704 2705 /* Now try to reset the chip */ 2706 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) { 2707 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, 2708 &instance->reg_set->fusion_seq_offset); 2709 writel(MPI2_WRSEQ_1ST_KEY_VALUE, 2710 &instance->reg_set->fusion_seq_offset); 2711 writel(MPI2_WRSEQ_2ND_KEY_VALUE, 2712 &instance->reg_set->fusion_seq_offset); 2713 writel(MPI2_WRSEQ_3RD_KEY_VALUE, 2714 &instance->reg_set->fusion_seq_offset); 2715 writel(MPI2_WRSEQ_4TH_KEY_VALUE, 2716 &instance->reg_set->fusion_seq_offset); 2717 writel(MPI2_WRSEQ_5TH_KEY_VALUE, 2718 &instance->reg_set->fusion_seq_offset); 2719 writel(MPI2_WRSEQ_6TH_KEY_VALUE, 2720 &instance->reg_set->fusion_seq_offset); 2721 2722 /* Check that the diag write enable (DRWE) bit is on */ 2723 host_diag = readl(&instance->reg_set->fusion_host_diag); 2724 retry = 0; 2725 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2726 msleep(100); 2727 host_diag = 2728 readl(&instance->reg_set->fusion_host_diag); 2729 if (retry++ == 100) { 2730 printk(KERN_WARNING "megaraid_sas: " 2731 "Host diag unlock failed! " 2732 "for scsi%d\n", 2733 instance->host->host_no); 2734 break; 2735 } 2736 } 2737 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 2738 continue; 2739 2740 /* Send chip reset command */ 2741 writel(host_diag | HOST_DIAG_RESET_ADAPTER, 2742 &instance->reg_set->fusion_host_diag); 2743 msleep(3000); 2744 2745 /* Make sure reset adapter bit is cleared */ 2746 host_diag = readl(&instance->reg_set->fusion_host_diag); 2747 retry = 0; 2748 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 2749 msleep(100); 2750 host_diag = 2751 readl(&instance->reg_set->fusion_host_diag); 2752 if (retry++ == 1000) { 2753 printk(KERN_WARNING "megaraid_sas: " 2754 "Diag reset adapter never " 2755 "cleared for scsi%d!\n", 2756 instance->host->host_no); 2757 break; 2758 } 2759 } 2760 if (host_diag & HOST_DIAG_RESET_ADAPTER) 2761 continue; 2762 2763 abs_state = 2764 instance->instancet->read_fw_status_reg( 2765 instance->reg_set) & MFI_STATE_MASK; 2766 retry = 0; 2767 2768 while ((abs_state <= MFI_STATE_FW_INIT) && 2769 (retry++ < 1000)) { 2770 msleep(100); 2771 abs_state = 2772 instance->instancet->read_fw_status_reg( 2773 instance->reg_set) & MFI_STATE_MASK; 2774 } 2775 if (abs_state <= MFI_STATE_FW_INIT) { 2776 printk(KERN_WARNING "megaraid_sas: firmware " 2777 "state < MFI_STATE_FW_INIT, state = " 2778 "0x%x for scsi%d\n", abs_state, 2779 instance->host->host_no); 2780 continue; 2781 } 2782 2783 /* Wait for FW to become ready */ 2784 if (megasas_transition_to_ready(instance, 1)) { 2785 printk(KERN_WARNING "megaraid_sas: Failed to " 2786 "transition controller to ready " 2787 "for scsi%d.\n", 2788 instance->host->host_no); 2789 continue; 2790 } 2791 2792 megasas_reset_reply_desc(instance); 2793 if (megasas_ioc_init_fusion(instance)) { 2794 printk(KERN_WARNING "megaraid_sas: " 2795 "megasas_ioc_init_fusion() failed!" 2796 " for scsi%d\n", 2797 instance->host->host_no); 2798 continue; 2799 } 2800 2801 /* Re-fire management commands */ 2802 for (j = 0 ; j < instance->max_fw_cmds; j++) { 2803 cmd_fusion = fusion->cmd_list[j]; 2804 if (cmd_fusion->sync_cmd_idx != 2805 (u32)ULONG_MAX) { 2806 cmd_mfi = 2807 instance-> 2808 cmd_list[cmd_fusion->sync_cmd_idx]; 2809 if (cmd_mfi->frame->dcmd.opcode == 2810 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) { 2811 megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion); 2812 } else { 2813 req_desc = 2814 megasas_get_request_descriptor( 2815 instance, 2816 cmd_mfi->context.smid 2817 -1); 2818 if (!req_desc) { 2819 printk(KERN_WARNING 2820 "req_desc NULL" 2821 " for scsi%d\n", 2822 instance->host->host_no); 2823 /* Return leaked MPT 2824 frame */ 2825 megasas_return_cmd_fusion(instance, cmd_fusion); 2826 } else { 2827 instance->instancet-> 2828 fire_cmd(instance, 2829 req_desc-> 2830 u.low, 2831 req_desc-> 2832 u.high, 2833 instance-> 2834 reg_set); 2835 } 2836 } 2837 } 2838 } 2839 2840 clear_bit(MEGASAS_FUSION_IN_RESET, 2841 &instance->reset_flags); 2842 instance->instancet->enable_intr(instance); 2843 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2844 2845 /* Reset load balance info */ 2846 memset(fusion->load_balance_info, 0, 2847 sizeof(struct LD_LOAD_BALANCE_INFO) 2848 *MAX_LOGICAL_DRIVES_EXT); 2849 2850 if (!megasas_get_map_info(instance)) 2851 megasas_sync_map_info(instance); 2852 2853 /* Restart SR-IOV heartbeat */ 2854 if (instance->requestorId) { 2855 if (!megasas_sriov_start_heartbeat(instance, 0)) 2856 megasas_start_timer(instance, 2857 &instance->sriov_heartbeat_timer, 2858 megasas_sriov_heartbeat_handler, 2859 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2860 else 2861 instance->skip_heartbeat_timer_del = 1; 2862 } 2863 2864 /* Adapter reset completed successfully */ 2865 printk(KERN_WARNING "megaraid_sas: Reset " 2866 "successful for scsi%d.\n", 2867 instance->host->host_no); 2868 2869 if (instance->crash_dump_drv_support) { 2870 if (instance->crash_dump_app_support) 2871 megasas_set_crash_dump_params(instance, 2872 MR_CRASH_BUF_TURN_ON); 2873 else 2874 megasas_set_crash_dump_params(instance, 2875 MR_CRASH_BUF_TURN_OFF); 2876 } 2877 retval = SUCCESS; 2878 goto out; 2879 } 2880 /* Reset failed, kill the adapter */ 2881 printk(KERN_WARNING "megaraid_sas: Reset failed, killing " 2882 "adapter scsi%d.\n", instance->host->host_no); 2883 megaraid_sas_kill_hba(instance); 2884 instance->skip_heartbeat_timer_del = 1; 2885 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; 2886 retval = FAILED; 2887 } else { 2888 /* For VF: Restart HB timer if we didn't OCR */ 2889 if (instance->requestorId) { 2890 megasas_start_timer(instance, 2891 &instance->sriov_heartbeat_timer, 2892 megasas_sriov_heartbeat_handler, 2893 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2894 } 2895 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2896 instance->instancet->enable_intr(instance); 2897 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2898 } 2899out: 2900 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); 2901 mutex_unlock(&instance->reset_mutex); 2902 return retval; 2903} 2904 2905/* Fusion Crash dump collection work queue */ 2906void megasas_fusion_crash_dump_wq(struct work_struct *work) 2907{ 2908 struct megasas_instance *instance = 2909 container_of(work, struct megasas_instance, crash_init); 2910 u32 status_reg; 2911 u8 partial_copy = 0; 2912 2913 2914 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set); 2915 2916 /* 2917 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer 2918 * to host crash buffers 2919 */ 2920 if (instance->drv_buf_index == 0) { 2921 /* Buffer is already allocated for old Crash dump. 2922 * Do OCR and do not wait for crash dump collection 2923 */ 2924 if (instance->drv_buf_alloc) { 2925 dev_info(&instance->pdev->dev, "earlier crash dump is " 2926 "not yet copied by application, ignoring this " 2927 "crash dump and initiating OCR\n"); 2928 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 2929 writel(status_reg, 2930 &instance->reg_set->outbound_scratch_pad); 2931 readl(&instance->reg_set->outbound_scratch_pad); 2932 return; 2933 } 2934 megasas_alloc_host_crash_buffer(instance); 2935 dev_info(&instance->pdev->dev, "Number of host crash buffers " 2936 "allocated: %d\n", instance->drv_buf_alloc); 2937 } 2938 2939 /* 2940 * Driver has allocated max buffers, which can be allocated 2941 * and FW has more crash dump data, then driver will 2942 * ignore the data. 2943 */ 2944 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) { 2945 dev_info(&instance->pdev->dev, "Driver is done copying " 2946 "the buffer: %d\n", instance->drv_buf_alloc); 2947 status_reg |= MFI_STATE_CRASH_DUMP_DONE; 2948 partial_copy = 1; 2949 } else { 2950 memcpy(instance->crash_buf[instance->drv_buf_index], 2951 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE); 2952 instance->drv_buf_index++; 2953 status_reg &= ~MFI_STATE_DMADONE; 2954 } 2955 2956 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) { 2957 dev_info(&instance->pdev->dev, "Crash Dump is available,number " 2958 "of copied buffers: %d\n", instance->drv_buf_index); 2959 instance->fw_crash_buffer_size = instance->drv_buf_index; 2960 instance->fw_crash_state = AVAILABLE; 2961 instance->drv_buf_index = 0; 2962 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 2963 readl(&instance->reg_set->outbound_scratch_pad); 2964 if (!partial_copy) 2965 megasas_reset_fusion(instance->host, 0); 2966 } else { 2967 writel(status_reg, &instance->reg_set->outbound_scratch_pad); 2968 readl(&instance->reg_set->outbound_scratch_pad); 2969 } 2970} 2971 2972 2973/* Fusion OCR work queue */ 2974void megasas_fusion_ocr_wq(struct work_struct *work) 2975{ 2976 struct megasas_instance *instance = 2977 container_of(work, struct megasas_instance, work_init); 2978 2979 megasas_reset_fusion(instance->host, 0); 2980} 2981 2982struct megasas_instance_template megasas_instance_template_fusion = { 2983 .fire_cmd = megasas_fire_cmd_fusion, 2984 .enable_intr = megasas_enable_intr_fusion, 2985 .disable_intr = megasas_disable_intr_fusion, 2986 .clear_intr = megasas_clear_intr_fusion, 2987 .read_fw_status_reg = megasas_read_fw_status_reg_fusion, 2988 .adp_reset = megasas_adp_reset_fusion, 2989 .check_reset = megasas_check_reset_fusion, 2990 .service_isr = megasas_isr_fusion, 2991 .tasklet = megasas_complete_cmd_dpc_fusion, 2992 .init_adapter = megasas_init_adapter_fusion, 2993 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion, 2994 .issue_dcmd = megasas_issue_dcmd_fusion, 2995}; 2996