1/****************************************************************************** 2* QLOGIC LINUX SOFTWARE 3* 4* QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver 5* Copyright (C) 2000 Qlogic Corporation (www.qlogic.com) 6* Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc. 7* Copyright (C) 2003-2004 Christoph Hellwig 8* 9* This program is free software; you can redistribute it and/or modify it 10* under the terms of the GNU General Public License as published by the 11* Free Software Foundation; either version 2, or (at your option) any 12* later version. 13* 14* This program is distributed in the hope that it will be useful, but 15* WITHOUT ANY WARRANTY; without even the implied warranty of 16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17* General Public License for more details. 18* 19******************************************************************************/ 20#define QLA1280_VERSION "3.27.1" 21/***************************************************************************** 22 Revision History: 23 Rev 3.27.1, February 8, 2010, Michael Reed 24 - Retain firmware image for error recovery. 25 Rev 3.27, February 10, 2009, Michael Reed 26 - General code cleanup. 27 - Improve error recovery. 28 Rev 3.26, January 16, 2006 Jes Sorensen 29 - Ditch all < 2.6 support 30 Rev 3.25.1, February 10, 2005 Christoph Hellwig 31 - use pci_map_single to map non-S/G requests 32 - remove qla1280_proc_info 33 Rev 3.25, September 28, 2004, Christoph Hellwig 34 - add support for ISP1020/1040 35 - don't include "scsi.h" anymore for 2.6.x 36 Rev 3.24.4 June 7, 2004 Christoph Hellwig 37 - restructure firmware loading, cleanup initialization code 38 - prepare support for ISP1020/1040 chips 39 Rev 3.24.3 January 19, 2004, Jes Sorensen 40 - Handle PCI DMA mask settings correctly 41 - Correct order of error handling in probe_one, free_irq should not 42 be called if request_irq failed 43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez 44 - Big endian fixes (James) 45 - Remove bogus IOCB content on zero data transfer commands (Andrew) 46 Rev 3.24.1 January 5, 2004, Jes Sorensen 47 - Initialize completion queue to avoid OOPS on probe 48 - Handle interrupts during mailbox testing 49 Rev 3.24 November 17, 2003, Christoph Hellwig 50 - use struct list_head for completion queue 51 - avoid old Scsi_FOO typedefs 52 - cleanup 2.4 compat glue a bit 53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h" 54 - make initialization for memory mapped vs port I/O more similar 55 - remove broken pci config space manipulation 56 - kill more cruft 57 - this is an almost perfect 2.6 scsi driver now! ;) 58 Rev 3.23.39 December 17, 2003, Jes Sorensen 59 - Delete completion queue from srb if mailbox command failed to 60 to avoid qla1280_done completeting qla1280_error_action's 61 obsolete context 62 - Reduce arguments for qla1280_done 63 Rev 3.23.38 October 18, 2003, Christoph Hellwig 64 - Convert to new-style hotplugable driver for 2.6 65 - Fix missing scsi_unregister/scsi_host_put on HBA removal 66 - Kill some more cruft 67 Rev 3.23.37 October 1, 2003, Jes Sorensen 68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another 69 random CONFIG option 70 - Clean up locking in probe path 71 Rev 3.23.36 October 1, 2003, Christoph Hellwig 72 - queuecommand only ever receives new commands - clear flags 73 - Reintegrate lost fixes from Linux 2.5 74 Rev 3.23.35 August 14, 2003, Jes Sorensen 75 - Build against 2.6 76 Rev 3.23.34 July 23, 2003, Jes Sorensen 77 - Remove pointless TRUE/FALSE macros 78 - Clean up vchan handling 79 Rev 3.23.33 July 3, 2003, Jes Sorensen 80 - Don't define register access macros before define determining MMIO. 81 This just happened to work out on ia64 but not elsewhere. 82 - Don't try and read from the card while it is in reset as 83 it won't respond and causes an MCA 84 Rev 3.23.32 June 23, 2003, Jes Sorensen 85 - Basic support for boot time arguments 86 Rev 3.23.31 June 8, 2003, Jes Sorensen 87 - Reduce boot time messages 88 Rev 3.23.30 June 6, 2003, Jes Sorensen 89 - Do not enable sync/wide/ppr before it has been determined 90 that the target device actually supports it 91 - Enable DMA arbitration for multi channel controllers 92 Rev 3.23.29 June 3, 2003, Jes Sorensen 93 - Port to 2.5.69 94 Rev 3.23.28 June 3, 2003, Jes Sorensen 95 - Eliminate duplicate marker commands on bus resets 96 - Handle outstanding commands appropriately on bus/device resets 97 Rev 3.23.27 May 28, 2003, Jes Sorensen 98 - Remove bogus input queue code, let the Linux SCSI layer do the work 99 - Clean up NVRAM handling, only read it once from the card 100 - Add a number of missing default nvram parameters 101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen 102 - Use completion queue for mailbox commands instead of busy wait 103 Rev 3.23.25 Beta May 27, 2003, James Bottomley 104 - Migrate to use new error handling code 105 Rev 3.23.24 Beta May 21, 2003, James Bottomley 106 - Big endian support 107 - Cleanup data direction code 108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen 109 - Switch to using MMIO instead of PIO 110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen 111 - Fix PCI parity problem with 12160 during reset. 112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen 113 - Use pci_map_page()/pci_unmap_page() instead of map_single version. 114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen 115 - Remove < 2.4.x support 116 - Introduce HOST_LOCK to make the spin lock changes portable. 117 - Remove a bunch of idiotic and unnecessary typedef's 118 - Kill all leftovers of target-mode support which never worked anyway 119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds 120 - Do qla1280_pci_config() before calling request_irq() and 121 request_region() 122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead 123 of large shifts 124 - Hand correct arguments to free_irq() in case of failure 125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen 126 - Run source through Lindent and clean up the output 127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen 128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32 129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen 130 - Rely on mailbox commands generating interrupts - do not 131 run qla1280_isr() from ql1280_mailbox_command() 132 - Remove device_reg_t 133 - Integrate ql12160_set_target_parameters() with 1280 version 134 - Make qla1280_setup() non static 135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request 136 sent to the card - this command pauses the firmware!!! 137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen 138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions 139 - Remove a pile of pointless and confusing (srb_t **) and 140 (scsi_lu_t *) typecasts 141 - Explicit mark that we do not use the new error handling (for now) 142 - Remove scsi_qla_host_t and use 'struct' instead 143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled, 144 pci_64bit_slot flags which weren't used for anything anyway 145 - Grab host->host_lock while calling qla1280_isr() from abort() 146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we 147 do not need to save/restore flags in the interrupt handler 148 - Enable interrupts early (before any mailbox access) in preparation 149 for cleaning up the mailbox handling 150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen 151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace 152 it with proper use of dprintk(). 153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take 154 a debug level argument to determine if data is to be printed 155 - Add KERN_* info to printk() 156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen 157 - Significant cosmetic cleanups 158 - Change debug code to use dprintk() and remove #if mess 159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen 160 - More cosmetic cleanups, fix places treating return as function 161 - use cpu_relax() in qla1280_debounce_register() 162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen 163 - Make it compile under 2.5.5 164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen 165 - Do no typecast short * to long * in QL1280BoardTbl, this 166 broke miserably on big endian boxes 167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen 168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler 169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32 170 unsigned int to match the types from struct scsi_cmnd 171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen 172 - Remove bogus timer_t typedef from qla1280.h 173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's 174 for PCI_ values, call pci_set_master() 175 - Fix memleak of qla1280_buffer on module unload 176 - Only compile module parsing code #ifdef MODULE - should be 177 changed to use individual MODULE_PARM's later 178 - Remove dummy_buffer that was never modified nor printed 179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove 180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls 181 - Remove \r from print statements, this is Linux, not DOS 182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK} 183 dummy macros 184 - Remove C++ compile hack in header file as Linux driver are not 185 supposed to be compiled as C++ 186 - Kill MS_64BITS macro as it makes the code more readable 187 - Remove unnecessary flags.in_interrupts bit 188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen 189 - Dont' check for set flags on q->q_flag one by one in qla1280_next() 190 - Check whether the interrupt was generated by the QLA1280 before 191 doing any processing 192 - qla1280_status_entry(): Only zero out part of sense_buffer that 193 is not being copied into 194 - Remove more superflouous typecasts 195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy() 196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel 197 - Don't walk the entire list in qla1280_putq_t() just to directly 198 grab the pointer to the last element afterwards 199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen 200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver 201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen 202 - Set dev->max_sectors to 1024 203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen 204 - Provide compat macros for pci_enable_device(), pci_find_subsys() 205 and scsi_set_pci_device() 206 - Call scsi_set_pci_device() for all devices 207 - Reduce size of kernel version dependent device probe code 208 - Move duplicate probe/init code to separate function 209 - Handle error if qla1280_mem_alloc() fails 210 - Kill OFFSET() macro and use Linux's PCI definitions instead 211 - Kill private structure defining PCI config space (struct config_reg) 212 - Only allocate I/O port region if not in MMIO mode 213 - Remove duplicate (unused) sanity check of sife of srb_t 214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen 215 - Change home-brew memset() implementations to use memset() 216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial 217 port directly is not legal under Linux. 218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen 219 - Remove pre 2.2 kernel support 220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat) 221 - Fix MMIO access to use readl/writel instead of directly 222 dereferencing pointers 223 - Nuke MSDOS debugging code 224 - Change true/false data types to int from uint8_t 225 - Use int for counters instead of uint8_t etc. 226 - Clean up size & byte order conversion macro usage 227 Rev 3.23 Beta January 11, 2001 BN Qlogic 228 - Added check of device_id when handling non 229 QLA12160s during detect(). 230 Rev 3.22 Beta January 5, 2001 BN Qlogic 231 - Changed queue_task() to schedule_task() 232 for kernels 2.4.0 and higher. 233 Note: 2.4.0-testxx kernels released prior to 234 the actual 2.4.0 kernel release on January 2001 235 will get compile/link errors with schedule_task(). 236 Please update your kernel to released 2.4.0 level, 237 or comment lines in this file flagged with 3.22 238 to resolve compile/link error of schedule_task(). 239 - Added -DCONFIG_SMP in addition to -D__SMP__ 240 in Makefile for 2.4.0 builds of driver as module. 241 Rev 3.21 Beta January 4, 2001 BN Qlogic 242 - Changed criteria of 64/32 Bit mode of HBA 243 operation according to BITS_PER_LONG rather 244 than HBA's NVRAM setting of >4Gig memory bit; 245 so that the HBA auto-configures without the need 246 to setup each system individually. 247 Rev 3.20 Beta December 5, 2000 BN Qlogic 248 - Added priority handling to IA-64 onboard SCSI 249 ISP12160 chip for kernels greater than 2.3.18. 250 - Added irqrestore for qla1280_intr_handler. 251 - Enabled /proc/scsi/qla1280 interface. 252 - Clear /proc/scsi/qla1280 counters in detect(). 253 Rev 3.19 Beta October 13, 2000 BN Qlogic 254 - Declare driver_template for new kernel 255 (2.4.0 and greater) scsi initialization scheme. 256 - Update /proc/scsi entry for 2.3.18 kernels and 257 above as qla1280 258 Rev 3.18 Beta October 10, 2000 BN Qlogic 259 - Changed scan order of adapters to map 260 the QLA12160 followed by the QLA1280. 261 Rev 3.17 Beta September 18, 2000 BN Qlogic 262 - Removed warnings for 32 bit 2.4.x compiles 263 - Corrected declared size for request and response 264 DMA addresses that are kept in each ha 265 Rev. 3.16 Beta August 25, 2000 BN Qlogic 266 - Corrected 64 bit addressing issue on IA-64 267 where the upper 32 bits were not properly 268 passed to the RISC engine. 269 Rev. 3.15 Beta August 22, 2000 BN Qlogic 270 - Modified qla1280_setup_chip to properly load 271 ISP firmware for greater that 4 Gig memory on IA-64 272 Rev. 3.14 Beta August 16, 2000 BN Qlogic 273 - Added setting of dma_mask to full 64 bit 274 if flags.enable_64bit_addressing is set in NVRAM 275 Rev. 3.13 Beta August 16, 2000 BN Qlogic 276 - Use new PCI DMA mapping APIs for 2.4.x kernel 277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic 278 - Added check of pci_enable_device to detect() for 2.3.x 279 - Use pci_resource_start() instead of 280 pdev->resource[0].start in detect() for 2.3.x 281 - Updated driver version 282 Rev. 3.11 July 14, 2000 BN Qlogic 283 - Updated SCSI Firmware to following versions: 284 qla1x80: 8.13.08 285 qla1x160: 10.04.08 286 - Updated driver version to 3.11 287 Rev. 3.10 June 23, 2000 BN Qlogic 288 - Added filtering of AMI SubSys Vendor ID devices 289 Rev. 3.9 290 - DEBUG_QLA1280 undefined and new version BN Qlogic 291 Rev. 3.08b May 9, 2000 MD Dell 292 - Added logic to check against AMI subsystem vendor ID 293 Rev. 3.08 May 4, 2000 DG Qlogic 294 - Added logic to check for PCI subsystem ID. 295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic 296 - Updated SCSI Firmware to following versions: 297 qla12160: 10.01.19 298 qla1280: 8.09.00 299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic 300 - Internal revision; not released 301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic 302 - Edit correction for virt_to_bus and PROC. 303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic 304 - Merge changes from ia64 port. 305 Rev. 3.03 Mar 28, 2000 BN Qlogic 306 - Increase version to reflect new code drop with compile fix 307 of issue with inclusion of linux/spinlock for 2.3 kernels 308 Rev. 3.02 Mar 15, 2000 BN Qlogic 309 - Merge qla1280_proc_info from 2.10 code base 310 Rev. 3.01 Feb 10, 2000 BN Qlogic 311 - Corrected code to compile on a 2.2.x kernel. 312 Rev. 3.00 Jan 17, 2000 DG Qlogic 313 - Added 64-bit support. 314 Rev. 2.07 Nov 9, 1999 DG Qlogic 315 - Added new routine to set target parameters for ISP12160. 316 Rev. 2.06 Sept 10, 1999 DG Qlogic 317 - Added support for ISP12160 Ultra 3 chip. 318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont 319 - Modified code to remove errors generated when compiling with 320 Cygnus IA64 Compiler. 321 - Changed conversion of pointers to unsigned longs instead of integers. 322 - Changed type of I/O port variables from uint32_t to unsigned long. 323 - Modified OFFSET macro to work with 64-bit as well as 32-bit. 324 - Changed sprintf and printk format specifiers for pointers to %p. 325 - Changed some int to long type casts where needed in sprintf & printk. 326 - Added l modifiers to sprintf and printk format specifiers for longs. 327 - Removed unused local variables. 328 Rev. 1.20 June 8, 1999 DG, Qlogic 329 Changes to support RedHat release 6.0 (kernel 2.2.5). 330 - Added SCSI exclusive access lock (io_request_lock) when accessing 331 the adapter. 332 - Added changes for the new LINUX interface template. Some new error 333 handling routines have been added to the template, but for now we 334 will use the old ones. 335 - Initial Beta Release. 336*****************************************************************************/ 337 338 339#include <linux/module.h> 340 341#include <linux/types.h> 342#include <linux/string.h> 343#include <linux/errno.h> 344#include <linux/kernel.h> 345#include <linux/ioport.h> 346#include <linux/delay.h> 347#include <linux/timer.h> 348#include <linux/pci.h> 349#include <linux/proc_fs.h> 350#include <linux/stat.h> 351#include <linux/pci_ids.h> 352#include <linux/interrupt.h> 353#include <linux/init.h> 354#include <linux/dma-mapping.h> 355#include <linux/firmware.h> 356 357#include <asm/io.h> 358#include <asm/irq.h> 359#include <asm/byteorder.h> 360#include <asm/processor.h> 361#include <asm/types.h> 362 363#include <scsi/scsi.h> 364#include <scsi/scsi_cmnd.h> 365#include <scsi/scsi_device.h> 366#include <scsi/scsi_host.h> 367#include <scsi/scsi_tcq.h> 368 369#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 370#include <asm/sn/io.h> 371#endif 372 373 374/* 375 * Compile time Options: 376 * 0 - Disable and 1 - Enable 377 */ 378#define DEBUG_QLA1280_INTR 0 379#define DEBUG_PRINT_NVRAM 0 380#define DEBUG_QLA1280 0 381 382#define MEMORY_MAPPED_IO 1 383 384#include "qla1280.h" 385 386#ifndef BITS_PER_LONG 387#error "BITS_PER_LONG not defined!" 388#endif 389#if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM 390#define QLA_64BIT_PTR 1 391#endif 392 393#ifdef QLA_64BIT_PTR 394#define pci_dma_hi32(a) ((a >> 16) >> 16) 395#else 396#define pci_dma_hi32(a) 0 397#endif 398#define pci_dma_lo32(a) (a & 0xffffffff) 399 400#define NVRAM_DELAY() udelay(500) /* 2 microseconds */ 401 402#if defined(__ia64__) && !defined(ia64_platform_is) 403#define ia64_platform_is(foo) (!strcmp(x, platform_name)) 404#endif 405 406 407#define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) 408#define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ 409 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240) 410#define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \ 411 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160) 412 413 414static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *); 415static void qla1280_remove_one(struct pci_dev *); 416 417/* 418 * QLogic Driver Support Function Prototypes. 419 */ 420static void qla1280_done(struct scsi_qla_host *); 421static int qla1280_get_token(char *); 422static int qla1280_setup(char *s) __init; 423 424/* 425 * QLogic ISP1280 Hardware Support Function Prototypes. 426 */ 427static int qla1280_load_firmware(struct scsi_qla_host *); 428static int qla1280_init_rings(struct scsi_qla_host *); 429static int qla1280_nvram_config(struct scsi_qla_host *); 430static int qla1280_mailbox_command(struct scsi_qla_host *, 431 uint8_t, uint16_t *); 432static int qla1280_bus_reset(struct scsi_qla_host *, int); 433static int qla1280_device_reset(struct scsi_qla_host *, int, int); 434static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); 435static int qla1280_abort_isp(struct scsi_qla_host *); 436#ifdef QLA_64BIT_PTR 437static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *); 438#else 439static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *); 440#endif 441static void qla1280_nv_write(struct scsi_qla_host *, uint16_t); 442static void qla1280_poll(struct scsi_qla_host *); 443static void qla1280_reset_adapter(struct scsi_qla_host *); 444static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8); 445static void qla1280_isp_cmd(struct scsi_qla_host *); 446static void qla1280_isr(struct scsi_qla_host *, struct list_head *); 447static void qla1280_rst_aen(struct scsi_qla_host *); 448static void qla1280_status_entry(struct scsi_qla_host *, struct response *, 449 struct list_head *); 450static void qla1280_error_entry(struct scsi_qla_host *, struct response *, 451 struct list_head *); 452static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t); 453static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t); 454static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *); 455static request_t *qla1280_req_pkt(struct scsi_qla_host *); 456static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *, 457 unsigned int); 458static void qla1280_get_target_parameters(struct scsi_qla_host *, 459 struct scsi_device *); 460static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int); 461 462 463static struct qla_driver_setup driver_setup; 464 465/* 466 * convert scsi data direction to request_t control flags 467 */ 468static inline uint16_t 469qla1280_data_direction(struct scsi_cmnd *cmnd) 470{ 471 switch(cmnd->sc_data_direction) { 472 case DMA_FROM_DEVICE: 473 return BIT_5; 474 case DMA_TO_DEVICE: 475 return BIT_6; 476 case DMA_BIDIRECTIONAL: 477 return BIT_5 | BIT_6; 478 /* 479 * We could BUG() on default here if one of the four cases aren't 480 * met, but then again if we receive something like that from the 481 * SCSI layer we have more serious problems. This shuts up GCC. 482 */ 483 case DMA_NONE: 484 default: 485 return 0; 486 } 487} 488 489#if DEBUG_QLA1280 490static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd); 491static void __qla1280_dump_buffer(char *, int); 492#endif 493 494 495/* 496 * insmod needs to find the variable and make it point to something 497 */ 498#ifdef MODULE 499static char *qla1280; 500 501/* insmod qla1280 options=verbose" */ 502module_param(qla1280, charp, 0); 503#else 504__setup("qla1280=", qla1280_setup); 505#endif 506 507 508/* 509 * We use the scsi_pointer structure that's included with each scsi_command 510 * to overlay our struct srb over it. qla1280_init() checks that a srb is not 511 * bigger than a scsi_pointer. 512 */ 513 514#define CMD_SP(Cmnd) &Cmnd->SCp 515#define CMD_CDBLEN(Cmnd) Cmnd->cmd_len 516#define CMD_CDBP(Cmnd) Cmnd->cmnd 517#define CMD_SNSP(Cmnd) Cmnd->sense_buffer 518#define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE 519#define CMD_RESULT(Cmnd) Cmnd->result 520#define CMD_HANDLE(Cmnd) Cmnd->host_scribble 521#define CMD_REQUEST(Cmnd) Cmnd->request->cmd 522 523#define CMD_HOST(Cmnd) Cmnd->device->host 524#define SCSI_BUS_32(Cmnd) Cmnd->device->channel 525#define SCSI_TCN_32(Cmnd) Cmnd->device->id 526#define SCSI_LUN_32(Cmnd) Cmnd->device->lun 527 528 529/*****************************************/ 530/* ISP Boards supported by this driver */ 531/*****************************************/ 532 533struct qla_boards { 534 char *name; /* Board ID String */ 535 int numPorts; /* Number of SCSI ports */ 536 int fw_index; /* index into qla1280_fw_tbl for firmware */ 537}; 538 539/* NOTE: the last argument in each entry is used to index ql1280_board_tbl */ 540static struct pci_device_id qla1280_pci_tbl[] = { 541 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160, 542 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 543 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020, 544 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 545 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080, 546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 547 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240, 548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, 549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280, 550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160, 552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, 553 {0,} 554}; 555MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl); 556 557DEFINE_MUTEX(qla1280_firmware_mutex); 558 559struct qla_fw { 560 char *fwname; 561 const struct firmware *fw; 562}; 563 564#define QL_NUM_FW_IMAGES 3 565 566struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = { 567 {"qlogic/1040.bin", NULL}, /* image 0 */ 568 {"qlogic/1280.bin", NULL}, /* image 1 */ 569 {"qlogic/12160.bin", NULL}, /* image 2 */ 570}; 571 572/* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */ 573static struct qla_boards ql1280_board_tbl[] = { 574 {.name = "QLA12160", .numPorts = 2, .fw_index = 2}, 575 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0}, 576 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1}, 577 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1}, 578 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1}, 579 {.name = "QLA10160", .numPorts = 1, .fw_index = 2}, 580 {.name = " ", .numPorts = 0, .fw_index = -1}, 581}; 582 583static int qla1280_verbose = 1; 584 585#if DEBUG_QLA1280 586static int ql_debug_level = 1; 587#define dprintk(level, format, a...) \ 588 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0) 589#define qla1280_dump_buffer(level, buf, size) \ 590 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size) 591#define qla1280_print_scsi_cmd(level, cmd) \ 592 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd) 593#else 594#define ql_debug_level 0 595#define dprintk(level, format, a...) do{}while(0) 596#define qla1280_dump_buffer(a, b, c) do{}while(0) 597#define qla1280_print_scsi_cmd(a, b) do{}while(0) 598#endif 599 600#define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x); 601#define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x); 602#define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x); 603#define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x); 604 605 606static int qla1280_read_nvram(struct scsi_qla_host *ha) 607{ 608 uint16_t *wptr; 609 uint8_t chksum; 610 int cnt, i; 611 struct nvram *nv; 612 613 ENTER("qla1280_read_nvram"); 614 615 if (driver_setup.no_nvram) 616 return 1; 617 618 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no); 619 620 wptr = (uint16_t *)&ha->nvram; 621 nv = &ha->nvram; 622 chksum = 0; 623 for (cnt = 0; cnt < 3; cnt++) { 624 *wptr = qla1280_get_nvram_word(ha, cnt); 625 chksum += *wptr & 0xff; 626 chksum += (*wptr >> 8) & 0xff; 627 wptr++; 628 } 629 630 if (nv->id0 != 'I' || nv->id1 != 'S' || 631 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) { 632 dprintk(2, "Invalid nvram ID or version!\n"); 633 chksum = 1; 634 } else { 635 for (; cnt < sizeof(struct nvram); cnt++) { 636 *wptr = qla1280_get_nvram_word(ha, cnt); 637 chksum += *wptr & 0xff; 638 chksum += (*wptr >> 8) & 0xff; 639 wptr++; 640 } 641 } 642 643 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x" 644 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3, 645 nv->version); 646 647 648 if (chksum) { 649 if (!driver_setup.no_nvram) 650 printk(KERN_WARNING "scsi(%ld): Unable to identify or " 651 "validate NVRAM checksum, using default " 652 "settings\n", ha->host_no); 653 ha->nvram_valid = 0; 654 } else 655 ha->nvram_valid = 1; 656 657 /* The firmware interface is, um, interesting, in that the 658 * actual firmware image on the chip is little endian, thus, 659 * the process of taking that image to the CPU would end up 660 * little endian. However, the firmware interface requires it 661 * to be read a word (two bytes) at a time. 662 * 663 * The net result of this would be that the word (and 664 * doubleword) quantites in the firmware would be correct, but 665 * the bytes would be pairwise reversed. Since most of the 666 * firmware quantites are, in fact, bytes, we do an extra 667 * le16_to_cpu() in the firmware read routine. 668 * 669 * The upshot of all this is that the bytes in the firmware 670 * are in the correct places, but the 16 and 32 bit quantites 671 * are still in little endian format. We fix that up below by 672 * doing extra reverses on them */ 673 nv->isp_parameter = cpu_to_le16(nv->isp_parameter); 674 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w); 675 for(i = 0; i < MAX_BUSES; i++) { 676 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout); 677 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth); 678 } 679 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n"); 680 LEAVE("qla1280_read_nvram"); 681 682 return chksum; 683} 684 685/************************************************************************** 686 * qla1280_info 687 * Return a string describing the driver. 688 **************************************************************************/ 689static const char * 690qla1280_info(struct Scsi_Host *host) 691{ 692 static char qla1280_scsi_name_buffer[125]; 693 char *bp; 694 struct scsi_qla_host *ha; 695 struct qla_boards *bdp; 696 697 bp = &qla1280_scsi_name_buffer[0]; 698 ha = (struct scsi_qla_host *)host->hostdata; 699 bdp = &ql1280_board_tbl[ha->devnum]; 700 memset(bp, 0, sizeof(qla1280_scsi_name_buffer)); 701 702 sprintf (bp, 703 "QLogic %s PCI to SCSI Host Adapter\n" 704 " Firmware version: %2d.%02d.%02d, Driver version %s", 705 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3, 706 QLA1280_VERSION); 707 return bp; 708} 709 710/************************************************************************** 711 * qla1280_queuecommand 712 * Queue a command to the controller. 713 * 714 * Note: 715 * The mid-level driver tries to ensures that queuecommand never gets invoked 716 * concurrently with itself or the interrupt handler (although the 717 * interrupt handler may call this routine as part of request-completion 718 * handling). Unfortunely, it sometimes calls the scheduler in interrupt 719 * context which is a big NO! NO!. 720 **************************************************************************/ 721static int 722qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) 723{ 724 struct Scsi_Host *host = cmd->device->host; 725 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 726 struct srb *sp = (struct srb *)CMD_SP(cmd); 727 int status; 728 729 cmd->scsi_done = fn; 730 sp->cmd = cmd; 731 sp->flags = 0; 732 sp->wait = NULL; 733 CMD_HANDLE(cmd) = (unsigned char *)NULL; 734 735 qla1280_print_scsi_cmd(5, cmd); 736 737#ifdef QLA_64BIT_PTR 738 /* 739 * Using 64 bit commands if the PCI bridge doesn't support it is a 740 * bit wasteful, however this should really only happen if one's 741 * PCI controller is completely broken, like the BCM1250. For 742 * sane hardware this is not an issue. 743 */ 744 status = qla1280_64bit_start_scsi(ha, sp); 745#else 746 status = qla1280_32bit_start_scsi(ha, sp); 747#endif 748 return status; 749} 750 751static DEF_SCSI_QCMD(qla1280_queuecommand) 752 753enum action { 754 ABORT_COMMAND, 755 DEVICE_RESET, 756 BUS_RESET, 757 ADAPTER_RESET, 758}; 759 760 761static void qla1280_mailbox_timeout(unsigned long __data) 762{ 763 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data; 764 struct device_reg __iomem *reg; 765 reg = ha->iobase; 766 767 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0); 768 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, " 769 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0], 770 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus)); 771 complete(ha->mailbox_wait); 772} 773 774static int 775_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp, 776 struct completion *wait) 777{ 778 int status = FAILED; 779 struct scsi_cmnd *cmd = sp->cmd; 780 781 spin_unlock_irq(ha->host->host_lock); 782 wait_for_completion_timeout(wait, 4*HZ); 783 spin_lock_irq(ha->host->host_lock); 784 sp->wait = NULL; 785 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { 786 status = SUCCESS; 787 (*cmd->scsi_done)(cmd); 788 } 789 return status; 790} 791 792static int 793qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp) 794{ 795 DECLARE_COMPLETION_ONSTACK(wait); 796 797 sp->wait = &wait; 798 return _qla1280_wait_for_single_command(ha, sp, &wait); 799} 800 801static int 802qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) 803{ 804 int cnt; 805 int status; 806 struct srb *sp; 807 struct scsi_cmnd *cmd; 808 809 status = SUCCESS; 810 811 /* 812 * Wait for all commands with the designated bus/target 813 * to be completed by the firmware 814 */ 815 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 816 sp = ha->outstanding_cmds[cnt]; 817 if (sp) { 818 cmd = sp->cmd; 819 820 if (bus >= 0 && SCSI_BUS_32(cmd) != bus) 821 continue; 822 if (target >= 0 && SCSI_TCN_32(cmd) != target) 823 continue; 824 825 status = qla1280_wait_for_single_command(ha, sp); 826 if (status == FAILED) 827 break; 828 } 829 } 830 return status; 831} 832 833/************************************************************************** 834 * qla1280_error_action 835 * The function will attempt to perform a specified error action and 836 * wait for the results (or time out). 837 * 838 * Input: 839 * cmd = Linux SCSI command packet of the command that cause the 840 * bus reset. 841 * action = error action to take (see action_t) 842 * 843 * Returns: 844 * SUCCESS or FAILED 845 * 846 **************************************************************************/ 847static int 848qla1280_error_action(struct scsi_cmnd *cmd, enum action action) 849{ 850 struct scsi_qla_host *ha; 851 int bus, target, lun; 852 struct srb *sp; 853 int i, found; 854 int result=FAILED; 855 int wait_for_bus=-1; 856 int wait_for_target = -1; 857 DECLARE_COMPLETION_ONSTACK(wait); 858 859 ENTER("qla1280_error_action"); 860 861 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); 862 sp = (struct srb *)CMD_SP(cmd); 863 bus = SCSI_BUS_32(cmd); 864 target = SCSI_TCN_32(cmd); 865 lun = SCSI_LUN_32(cmd); 866 867 dprintk(4, "error_action %i, istatus 0x%04x\n", action, 868 RD_REG_WORD(&ha->iobase->istatus)); 869 870 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n", 871 RD_REG_WORD(&ha->iobase->host_cmd), 872 RD_REG_WORD(&ha->iobase->ictrl), jiffies); 873 874 if (qla1280_verbose) 875 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " 876 "Handle=0x%p, action=0x%x\n", 877 ha->host_no, cmd, CMD_HANDLE(cmd), action); 878 879 /* 880 * Check to see if we have the command in the outstanding_cmds[] 881 * array. If not then it must have completed before this error 882 * action was initiated. If the error_action isn't ABORT_COMMAND 883 * then the driver must proceed with the requested action. 884 */ 885 found = -1; 886 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 887 if (sp == ha->outstanding_cmds[i]) { 888 found = i; 889 sp->wait = &wait; /* we'll wait for it to complete */ 890 break; 891 } 892 } 893 894 if (found < 0) { /* driver doesn't have command */ 895 result = SUCCESS; 896 if (qla1280_verbose) { 897 printk(KERN_INFO 898 "scsi(%ld:%d:%d:%d): specified command has " 899 "already completed.\n", ha->host_no, bus, 900 target, lun); 901 } 902 } 903 904 switch (action) { 905 906 case ABORT_COMMAND: 907 dprintk(1, "qla1280: RISC aborting command\n"); 908 /* 909 * The abort might fail due to race when the host_lock 910 * is released to issue the abort. As such, we 911 * don't bother to check the return status. 912 */ 913 if (found >= 0) 914 qla1280_abort_command(ha, sp, found); 915 break; 916 917 case DEVICE_RESET: 918 if (qla1280_verbose) 919 printk(KERN_INFO 920 "scsi(%ld:%d:%d:%d): Queueing device reset " 921 "command.\n", ha->host_no, bus, target, lun); 922 if (qla1280_device_reset(ha, bus, target) == 0) { 923 /* issued device reset, set wait conditions */ 924 wait_for_bus = bus; 925 wait_for_target = target; 926 } 927 break; 928 929 case BUS_RESET: 930 if (qla1280_verbose) 931 printk(KERN_INFO "qla1280(%ld:%d): Issued bus " 932 "reset.\n", ha->host_no, bus); 933 if (qla1280_bus_reset(ha, bus) == 0) { 934 /* issued bus reset, set wait conditions */ 935 wait_for_bus = bus; 936 } 937 break; 938 939 case ADAPTER_RESET: 940 default: 941 if (qla1280_verbose) { 942 printk(KERN_INFO 943 "scsi(%ld): Issued ADAPTER RESET\n", 944 ha->host_no); 945 printk(KERN_INFO "scsi(%ld): I/O processing will " 946 "continue automatically\n", ha->host_no); 947 } 948 ha->flags.reset_active = 1; 949 950 if (qla1280_abort_isp(ha) != 0) { /* it's dead */ 951 result = FAILED; 952 } 953 954 ha->flags.reset_active = 0; 955 } 956 957 /* 958 * At this point, the host_lock has been released and retaken 959 * by the issuance of the mailbox command. 960 * Wait for the command passed in by the mid-layer if it 961 * was found by the driver. It might have been returned 962 * between eh recovery steps, hence the check of the "found" 963 * variable. 964 */ 965 966 if (found >= 0) 967 result = _qla1280_wait_for_single_command(ha, sp, &wait); 968 969 if (action == ABORT_COMMAND && result != SUCCESS) { 970 printk(KERN_WARNING 971 "scsi(%li:%i:%i:%i): " 972 "Unable to abort command!\n", 973 ha->host_no, bus, target, lun); 974 } 975 976 /* 977 * If the command passed in by the mid-layer has been 978 * returned by the board, then wait for any additional 979 * commands which are supposed to complete based upon 980 * the error action. 981 * 982 * All commands are unconditionally returned during a 983 * call to qla1280_abort_isp(), ADAPTER_RESET. No need 984 * to wait for them. 985 */ 986 if (result == SUCCESS && wait_for_bus >= 0) { 987 result = qla1280_wait_for_pending_commands(ha, 988 wait_for_bus, wait_for_target); 989 } 990 991 dprintk(1, "RESET returning %d\n", result); 992 993 LEAVE("qla1280_error_action"); 994 return result; 995} 996 997/************************************************************************** 998 * qla1280_abort 999 * Abort the specified SCSI command(s). 1000 **************************************************************************/ 1001static int 1002qla1280_eh_abort(struct scsi_cmnd * cmd) 1003{ 1004 int rc; 1005 1006 spin_lock_irq(cmd->device->host->host_lock); 1007 rc = qla1280_error_action(cmd, ABORT_COMMAND); 1008 spin_unlock_irq(cmd->device->host->host_lock); 1009 1010 return rc; 1011} 1012 1013/************************************************************************** 1014 * qla1280_device_reset 1015 * Reset the specified SCSI device 1016 **************************************************************************/ 1017static int 1018qla1280_eh_device_reset(struct scsi_cmnd *cmd) 1019{ 1020 int rc; 1021 1022 spin_lock_irq(cmd->device->host->host_lock); 1023 rc = qla1280_error_action(cmd, DEVICE_RESET); 1024 spin_unlock_irq(cmd->device->host->host_lock); 1025 1026 return rc; 1027} 1028 1029/************************************************************************** 1030 * qla1280_bus_reset 1031 * Reset the specified bus. 1032 **************************************************************************/ 1033static int 1034qla1280_eh_bus_reset(struct scsi_cmnd *cmd) 1035{ 1036 int rc; 1037 1038 spin_lock_irq(cmd->device->host->host_lock); 1039 rc = qla1280_error_action(cmd, BUS_RESET); 1040 spin_unlock_irq(cmd->device->host->host_lock); 1041 1042 return rc; 1043} 1044 1045/************************************************************************** 1046 * qla1280_adapter_reset 1047 * Reset the specified adapter (both channels) 1048 **************************************************************************/ 1049static int 1050qla1280_eh_adapter_reset(struct scsi_cmnd *cmd) 1051{ 1052 int rc; 1053 1054 spin_lock_irq(cmd->device->host->host_lock); 1055 rc = qla1280_error_action(cmd, ADAPTER_RESET); 1056 spin_unlock_irq(cmd->device->host->host_lock); 1057 1058 return rc; 1059} 1060 1061static int 1062qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev, 1063 sector_t capacity, int geom[]) 1064{ 1065 int heads, sectors, cylinders; 1066 1067 heads = 64; 1068 sectors = 32; 1069 cylinders = (unsigned long)capacity / (heads * sectors); 1070 if (cylinders > 1024) { 1071 heads = 255; 1072 sectors = 63; 1073 cylinders = (unsigned long)capacity / (heads * sectors); 1074 /* if (cylinders > 1023) 1075 cylinders = 1023; */ 1076 } 1077 1078 geom[0] = heads; 1079 geom[1] = sectors; 1080 geom[2] = cylinders; 1081 1082 return 0; 1083} 1084 1085 1086/* disable risc and host interrupts */ 1087static inline void 1088qla1280_disable_intrs(struct scsi_qla_host *ha) 1089{ 1090 WRT_REG_WORD(&ha->iobase->ictrl, 0); 1091 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1092} 1093 1094/* enable risc and host interrupts */ 1095static inline void 1096qla1280_enable_intrs(struct scsi_qla_host *ha) 1097{ 1098 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC)); 1099 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */ 1100} 1101 1102/************************************************************************** 1103 * qla1280_intr_handler 1104 * Handles the H/W interrupt 1105 **************************************************************************/ 1106static irqreturn_t 1107qla1280_intr_handler(int irq, void *dev_id) 1108{ 1109 struct scsi_qla_host *ha; 1110 struct device_reg __iomem *reg; 1111 u16 data; 1112 int handled = 0; 1113 1114 ENTER_INTR ("qla1280_intr_handler"); 1115 ha = (struct scsi_qla_host *)dev_id; 1116 1117 spin_lock(ha->host->host_lock); 1118 1119 ha->isr_count++; 1120 reg = ha->iobase; 1121 1122 qla1280_disable_intrs(ha); 1123 1124 data = qla1280_debounce_register(®->istatus); 1125 /* Check for pending interrupts. */ 1126 if (data & RISC_INT) { 1127 qla1280_isr(ha, &ha->done_q); 1128 handled = 1; 1129 } 1130 if (!list_empty(&ha->done_q)) 1131 qla1280_done(ha); 1132 1133 spin_unlock(ha->host->host_lock); 1134 1135 qla1280_enable_intrs(ha); 1136 1137 LEAVE_INTR("qla1280_intr_handler"); 1138 return IRQ_RETVAL(handled); 1139} 1140 1141 1142static int 1143qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target) 1144{ 1145 uint8_t mr; 1146 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1147 struct nvram *nv; 1148 int status, lun; 1149 1150 nv = &ha->nvram; 1151 1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0; 1153 1154 /* Set Target Parameters. */ 1155 mb[0] = MBC_SET_TARGET_PARAMETERS; 1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8; 1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9; 1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10; 1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11; 1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12; 1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13; 1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14; 1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15; 1165 1166 if (IS_ISP1x160(ha)) { 1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5; 1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8); 1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) | 1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width; 1171 mr |= BIT_6; 1172 } else { 1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8); 1174 } 1175 mb[3] |= nv->bus[bus].target[target].sync_period; 1176 1177 status = qla1280_mailbox_command(ha, mr, mb); 1178 1179 /* Set Device Queue Parameters. */ 1180 for (lun = 0; lun < MAX_LUNS; lun++) { 1181 mb[0] = MBC_SET_DEVICE_QUEUE; 1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 1183 mb[1] |= lun; 1184 mb[2] = nv->bus[bus].max_queue_depth; 1185 mb[3] = nv->bus[bus].target[target].execution_throttle; 1186 status |= qla1280_mailbox_command(ha, 0x0f, mb); 1187 } 1188 1189 if (status) 1190 printk(KERN_WARNING "scsi(%ld:%i:%i): " 1191 "qla1280_set_target_parameters() failed\n", 1192 ha->host_no, bus, target); 1193 return status; 1194} 1195 1196 1197/************************************************************************** 1198 * qla1280_slave_configure 1199 * 1200 * Description: 1201 * Determines the queue depth for a given device. There are two ways 1202 * a queue depth can be obtained for a tagged queueing device. One 1203 * way is the default queue depth which is determined by whether 1204 * If it is defined, then it is used 1205 * as the default queue depth. Otherwise, we use either 4 or 8 as the 1206 * default queue depth (dependent on the number of hardware SCBs). 1207 **************************************************************************/ 1208static int 1209qla1280_slave_configure(struct scsi_device *device) 1210{ 1211 struct scsi_qla_host *ha; 1212 int default_depth = 3; 1213 int bus = device->channel; 1214 int target = device->id; 1215 int status = 0; 1216 struct nvram *nv; 1217 unsigned long flags; 1218 1219 ha = (struct scsi_qla_host *)device->host->hostdata; 1220 nv = &ha->nvram; 1221 1222 if (qla1280_check_for_dead_scsi_bus(ha, bus)) 1223 return 1; 1224 1225 if (device->tagged_supported && 1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) { 1227 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, 1228 ha->bus_settings[bus].hiwat); 1229 } else { 1230 scsi_adjust_queue_depth(device, 0, default_depth); 1231 } 1232 1233 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr; 1234 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr; 1235 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr; 1236 1237 if (driver_setup.no_sync || 1238 (driver_setup.sync_mask && 1239 (~driver_setup.sync_mask & (1 << target)))) 1240 nv->bus[bus].target[target].parameter.enable_sync = 0; 1241 if (driver_setup.no_wide || 1242 (driver_setup.wide_mask && 1243 (~driver_setup.wide_mask & (1 << target)))) 1244 nv->bus[bus].target[target].parameter.enable_wide = 0; 1245 if (IS_ISP1x160(ha)) { 1246 if (driver_setup.no_ppr || 1247 (driver_setup.ppr_mask && 1248 (~driver_setup.ppr_mask & (1 << target)))) 1249 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0; 1250 } 1251 1252 spin_lock_irqsave(ha->host->host_lock, flags); 1253 if (nv->bus[bus].target[target].parameter.enable_sync) 1254 status = qla1280_set_target_parameters(ha, bus, target); 1255 qla1280_get_target_parameters(ha, device); 1256 spin_unlock_irqrestore(ha->host->host_lock, flags); 1257 return status; 1258} 1259 1260 1261/* 1262 * qla1280_done 1263 * Process completed commands. 1264 * 1265 * Input: 1266 * ha = adapter block pointer. 1267 */ 1268static void 1269qla1280_done(struct scsi_qla_host *ha) 1270{ 1271 struct srb *sp; 1272 struct list_head *done_q; 1273 int bus, target, lun; 1274 struct scsi_cmnd *cmd; 1275 1276 ENTER("qla1280_done"); 1277 1278 done_q = &ha->done_q; 1279 1280 while (!list_empty(done_q)) { 1281 sp = list_entry(done_q->next, struct srb, list); 1282 1283 list_del(&sp->list); 1284 1285 cmd = sp->cmd; 1286 bus = SCSI_BUS_32(cmd); 1287 target = SCSI_TCN_32(cmd); 1288 lun = SCSI_LUN_32(cmd); 1289 1290 switch ((CMD_RESULT(cmd) >> 16)) { 1291 case DID_RESET: 1292 /* Issue marker command. */ 1293 if (!ha->flags.abort_isp_active) 1294 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 1295 break; 1296 case DID_ABORT: 1297 sp->flags &= ~SRB_ABORT_PENDING; 1298 sp->flags |= SRB_ABORTED; 1299 break; 1300 default: 1301 break; 1302 } 1303 1304 /* Release memory used for this I/O */ 1305 scsi_dma_unmap(cmd); 1306 1307 /* Call the mid-level driver interrupt handler */ 1308 ha->actthreads--; 1309 1310 if (sp->wait == NULL) 1311 (*(cmd)->scsi_done)(cmd); 1312 else 1313 complete(sp->wait); 1314 } 1315 LEAVE("qla1280_done"); 1316} 1317 1318/* 1319 * Translates a ISP error to a Linux SCSI error 1320 */ 1321static int 1322qla1280_return_status(struct response * sts, struct scsi_cmnd *cp) 1323{ 1324 int host_status = DID_ERROR; 1325 uint16_t comp_status = le16_to_cpu(sts->comp_status); 1326 uint16_t state_flags = le16_to_cpu(sts->state_flags); 1327 uint32_t residual_length = le32_to_cpu(sts->residual_length); 1328 uint16_t scsi_status = le16_to_cpu(sts->scsi_status); 1329#if DEBUG_QLA1280_INTR 1330 static char *reason[] = { 1331 "DID_OK", 1332 "DID_NO_CONNECT", 1333 "DID_BUS_BUSY", 1334 "DID_TIME_OUT", 1335 "DID_BAD_TARGET", 1336 "DID_ABORT", 1337 "DID_PARITY", 1338 "DID_ERROR", 1339 "DID_RESET", 1340 "DID_BAD_INTR" 1341 }; 1342#endif /* DEBUG_QLA1280_INTR */ 1343 1344 ENTER("qla1280_return_status"); 1345 1346#if DEBUG_QLA1280_INTR 1347 /* 1348 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n", 1349 comp_status); 1350 */ 1351#endif 1352 1353 switch (comp_status) { 1354 case CS_COMPLETE: 1355 host_status = DID_OK; 1356 break; 1357 1358 case CS_INCOMPLETE: 1359 if (!(state_flags & SF_GOT_BUS)) 1360 host_status = DID_NO_CONNECT; 1361 else if (!(state_flags & SF_GOT_TARGET)) 1362 host_status = DID_BAD_TARGET; 1363 else if (!(state_flags & SF_SENT_CDB)) 1364 host_status = DID_ERROR; 1365 else if (!(state_flags & SF_TRANSFERRED_DATA)) 1366 host_status = DID_ERROR; 1367 else if (!(state_flags & SF_GOT_STATUS)) 1368 host_status = DID_ERROR; 1369 else if (!(state_flags & SF_GOT_SENSE)) 1370 host_status = DID_ERROR; 1371 break; 1372 1373 case CS_RESET: 1374 host_status = DID_RESET; 1375 break; 1376 1377 case CS_ABORTED: 1378 host_status = DID_ABORT; 1379 break; 1380 1381 case CS_TIMEOUT: 1382 host_status = DID_TIME_OUT; 1383 break; 1384 1385 case CS_DATA_OVERRUN: 1386 dprintk(2, "Data overrun 0x%x\n", residual_length); 1387 dprintk(2, "qla1280_return_status: response packet data\n"); 1388 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE); 1389 host_status = DID_ERROR; 1390 break; 1391 1392 case CS_DATA_UNDERRUN: 1393 if ((scsi_bufflen(cp) - residual_length) < 1394 cp->underflow) { 1395 printk(KERN_WARNING 1396 "scsi: Underflow detected - retrying " 1397 "command.\n"); 1398 host_status = DID_ERROR; 1399 } else { 1400 scsi_set_resid(cp, residual_length); 1401 host_status = DID_OK; 1402 } 1403 break; 1404 1405 default: 1406 host_status = DID_ERROR; 1407 break; 1408 } 1409 1410#if DEBUG_QLA1280_INTR 1411 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n", 1412 reason[host_status], scsi_status); 1413#endif 1414 1415 LEAVE("qla1280_return_status"); 1416 1417 return (scsi_status & 0xff) | (host_status << 16); 1418} 1419 1420/****************************************************************************/ 1421/* QLogic ISP1280 Hardware Support Functions. */ 1422/****************************************************************************/ 1423 1424/* 1425 * qla1280_initialize_adapter 1426 * Initialize board. 1427 * 1428 * Input: 1429 * ha = adapter block pointer. 1430 * 1431 * Returns: 1432 * 0 = success 1433 */ 1434static int 1435qla1280_initialize_adapter(struct scsi_qla_host *ha) 1436{ 1437 struct device_reg __iomem *reg; 1438 int status; 1439 int bus; 1440 unsigned long flags; 1441 1442 ENTER("qla1280_initialize_adapter"); 1443 1444 /* Clear adapter flags. */ 1445 ha->flags.online = 0; 1446 ha->flags.disable_host_adapter = 0; 1447 ha->flags.reset_active = 0; 1448 ha->flags.abort_isp_active = 0; 1449 1450#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 1451 if (ia64_platform_is("sn2")) { 1452 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 1453 "dual channel lockup workaround\n", ha->host_no); 1454 ha->flags.use_pci_vchannel = 1; 1455 driver_setup.no_nvram = 1; 1456 } 1457#endif 1458 1459 /* TODO: implement support for the 1040 nvram format */ 1460 if (IS_ISP1040(ha)) 1461 driver_setup.no_nvram = 1; 1462 1463 dprintk(1, "Configure PCI space for adapter...\n"); 1464 1465 reg = ha->iobase; 1466 1467 /* Insure mailbox registers are free. */ 1468 WRT_REG_WORD(®->semaphore, 0); 1469 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 1470 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT); 1471 RD_REG_WORD(®->host_cmd); 1472 1473 if (qla1280_read_nvram(ha)) { 1474 dprintk(2, "qla1280_initialize_adapter: failed to read " 1475 "NVRAM\n"); 1476 } 1477 1478 /* 1479 * It's necessary to grab the spin here as qla1280_mailbox_command 1480 * needs to be able to drop the lock unconditionally to wait 1481 * for completion. 1482 */ 1483 spin_lock_irqsave(ha->host->host_lock, flags); 1484 1485 status = qla1280_load_firmware(ha); 1486 if (status) { 1487 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n", 1488 ha->host_no); 1489 goto out; 1490 } 1491 1492 /* Setup adapter based on NVRAM parameters. */ 1493 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no); 1494 qla1280_nvram_config(ha); 1495 1496 if (ha->flags.disable_host_adapter) { 1497 status = 1; 1498 goto out; 1499 } 1500 1501 status = qla1280_init_rings(ha); 1502 if (status) 1503 goto out; 1504 1505 /* Issue SCSI reset, if we can't reset twice then bus is dead */ 1506 for (bus = 0; bus < ha->ports; bus++) { 1507 if (!ha->bus_settings[bus].disable_scsi_reset && 1508 qla1280_bus_reset(ha, bus) && 1509 qla1280_bus_reset(ha, bus)) 1510 ha->bus_settings[bus].scsi_bus_dead = 1; 1511 } 1512 1513 ha->flags.online = 1; 1514 out: 1515 spin_unlock_irqrestore(ha->host->host_lock, flags); 1516 1517 if (status) 1518 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n"); 1519 1520 LEAVE("qla1280_initialize_adapter"); 1521 return status; 1522} 1523 1524/* 1525 * qla1280_request_firmware 1526 * Acquire firmware for chip. Retain in memory 1527 * for error recovery. 1528 * 1529 * Input: 1530 * ha = adapter block pointer. 1531 * 1532 * Returns: 1533 * Pointer to firmware image or an error code 1534 * cast to pointer via ERR_PTR(). 1535 */ 1536static const struct firmware * 1537qla1280_request_firmware(struct scsi_qla_host *ha) 1538{ 1539 const struct firmware *fw; 1540 int err; 1541 int index; 1542 char *fwname; 1543 1544 spin_unlock_irq(ha->host->host_lock); 1545 mutex_lock(&qla1280_firmware_mutex); 1546 1547 index = ql1280_board_tbl[ha->devnum].fw_index; 1548 fw = qla1280_fw_tbl[index].fw; 1549 if (fw) 1550 goto out; 1551 1552 fwname = qla1280_fw_tbl[index].fwname; 1553 err = request_firmware(&fw, fwname, &ha->pdev->dev); 1554 1555 if (err) { 1556 printk(KERN_ERR "Failed to load image \"%s\" err %d\n", 1557 fwname, err); 1558 fw = ERR_PTR(err); 1559 goto unlock; 1560 } 1561 if ((fw->size % 2) || (fw->size < 6)) { 1562 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n", 1563 fw->size, fwname); 1564 release_firmware(fw); 1565 fw = ERR_PTR(-EINVAL); 1566 goto unlock; 1567 } 1568 1569 qla1280_fw_tbl[index].fw = fw; 1570 1571 out: 1572 ha->fwver1 = fw->data[0]; 1573 ha->fwver2 = fw->data[1]; 1574 ha->fwver3 = fw->data[2]; 1575 unlock: 1576 mutex_unlock(&qla1280_firmware_mutex); 1577 spin_lock_irq(ha->host->host_lock); 1578 return fw; 1579} 1580 1581/* 1582 * Chip diagnostics 1583 * Test chip for proper operation. 1584 * 1585 * Input: 1586 * ha = adapter block pointer. 1587 * 1588 * Returns: 1589 * 0 = success. 1590 */ 1591static int 1592qla1280_chip_diag(struct scsi_qla_host *ha) 1593{ 1594 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1595 struct device_reg __iomem *reg = ha->iobase; 1596 int status = 0; 1597 int cnt; 1598 uint16_t data; 1599 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l); 1600 1601 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no); 1602 1603 /* Soft reset chip and wait for it to finish. */ 1604 WRT_REG_WORD(®->ictrl, ISP_RESET); 1605 1606 /* 1607 * We can't do a traditional PCI write flush here by reading 1608 * back the register. The card will not respond once the reset 1609 * is in action and we end up with a machine check exception 1610 * instead. Nothing to do but wait and hope for the best. 1611 * A portable pci_write_flush(pdev) call would be very useful here. 1612 */ 1613 udelay(20); 1614 data = qla1280_debounce_register(®->ictrl); 1615 /* 1616 * Yet another QLogic gem ;-( 1617 */ 1618 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) { 1619 udelay(5); 1620 data = RD_REG_WORD(®->ictrl); 1621 } 1622 1623 if (!cnt) 1624 goto fail; 1625 1626 /* Reset register cleared by chip reset. */ 1627 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n"); 1628 1629 WRT_REG_WORD(®->cfg_1, 0); 1630 1631 /* Reset RISC and disable BIOS which 1632 allows RISC to execute out of RAM. */ 1633 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC | 1634 HC_RELEASE_RISC | HC_DISABLE_BIOS); 1635 1636 RD_REG_WORD(®->id_l); /* Flush PCI write */ 1637 data = qla1280_debounce_register(®->mailbox0); 1638 1639 /* 1640 * I *LOVE* this code! 1641 */ 1642 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) { 1643 udelay(5); 1644 data = RD_REG_WORD(®->mailbox0); 1645 } 1646 1647 if (!cnt) 1648 goto fail; 1649 1650 /* Check product ID of chip */ 1651 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n"); 1652 1653 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 || 1654 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 && 1655 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) || 1656 RD_REG_WORD(®->mailbox3) != PROD_ID_3 || 1657 RD_REG_WORD(®->mailbox4) != PROD_ID_4) { 1658 printk(KERN_INFO "qla1280: Wrong product ID = " 1659 "0x%x,0x%x,0x%x,0x%x\n", 1660 RD_REG_WORD(®->mailbox1), 1661 RD_REG_WORD(®->mailbox2), 1662 RD_REG_WORD(®->mailbox3), 1663 RD_REG_WORD(®->mailbox4)); 1664 goto fail; 1665 } 1666 1667 /* 1668 * Enable ints early!!! 1669 */ 1670 qla1280_enable_intrs(ha); 1671 1672 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n"); 1673 /* Wrap Incoming Mailboxes Test. */ 1674 mb[0] = MBC_MAILBOX_REGISTER_TEST; 1675 mb[1] = 0xAAAA; 1676 mb[2] = 0x5555; 1677 mb[3] = 0xAA55; 1678 mb[4] = 0x55AA; 1679 mb[5] = 0xA5A5; 1680 mb[6] = 0x5A5A; 1681 mb[7] = 0x2525; 1682 1683 status = qla1280_mailbox_command(ha, 0xff, mb); 1684 if (status) 1685 goto fail; 1686 1687 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 || 1688 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A || 1689 mb[7] != 0x2525) { 1690 printk(KERN_INFO "qla1280: Failed mbox check\n"); 1691 goto fail; 1692 } 1693 1694 dprintk(3, "qla1280_chip_diag: exiting normally\n"); 1695 return 0; 1696 fail: 1697 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n"); 1698 return status; 1699} 1700 1701static int 1702qla1280_load_firmware_pio(struct scsi_qla_host *ha) 1703{ 1704 /* enter with host_lock acquired */ 1705 1706 const struct firmware *fw; 1707 const __le16 *fw_data; 1708 uint16_t risc_address, risc_code_size; 1709 uint16_t mb[MAILBOX_REGISTER_COUNT], i; 1710 int err = 0; 1711 1712 fw = qla1280_request_firmware(ha); 1713 if (IS_ERR(fw)) 1714 return PTR_ERR(fw); 1715 1716 fw_data = (const __le16 *)&fw->data[0]; 1717 ha->fwstart = __le16_to_cpu(fw_data[2]); 1718 1719 /* Load RISC code. */ 1720 risc_address = ha->fwstart; 1721 fw_data = (const __le16 *)&fw->data[6]; 1722 risc_code_size = (fw->size - 6) / 2; 1723 1724 for (i = 0; i < risc_code_size; i++) { 1725 mb[0] = MBC_WRITE_RAM_WORD; 1726 mb[1] = risc_address + i; 1727 mb[2] = __le16_to_cpu(fw_data[i]); 1728 1729 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb); 1730 if (err) { 1731 printk(KERN_ERR "scsi(%li): Failed to load firmware\n", 1732 ha->host_no); 1733 break; 1734 } 1735 } 1736 1737 return err; 1738} 1739 1740#define DUMP_IT_BACK 0 /* for debug of RISC loading */ 1741static int 1742qla1280_load_firmware_dma(struct scsi_qla_host *ha) 1743{ 1744 /* enter with host_lock acquired */ 1745 const struct firmware *fw; 1746 const __le16 *fw_data; 1747 uint16_t risc_address, risc_code_size; 1748 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt; 1749 int err = 0, num, i; 1750#if DUMP_IT_BACK 1751 uint8_t *sp, *tbuf; 1752 dma_addr_t p_tbuf; 1753 1754 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf); 1755 if (!tbuf) 1756 return -ENOMEM; 1757#endif 1758 1759 fw = qla1280_request_firmware(ha); 1760 if (IS_ERR(fw)) 1761 return PTR_ERR(fw); 1762 1763 fw_data = (const __le16 *)&fw->data[0]; 1764 ha->fwstart = __le16_to_cpu(fw_data[2]); 1765 1766 /* Load RISC code. */ 1767 risc_address = ha->fwstart; 1768 fw_data = (const __le16 *)&fw->data[6]; 1769 risc_code_size = (fw->size - 6) / 2; 1770 1771 dprintk(1, "%s: DMA RISC code (%i) words\n", 1772 __func__, risc_code_size); 1773 1774 num = 0; 1775 while (risc_code_size > 0) { 1776 int warn __attribute__((unused)) = 0; 1777 1778 cnt = 2000 >> 1; 1779 1780 if (cnt > risc_code_size) 1781 cnt = risc_code_size; 1782 1783 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p)," 1784 "%d,%d(0x%x)\n", 1785 fw_data, cnt, num, risc_address); 1786 for(i = 0; i < cnt; i++) 1787 ((__le16 *)ha->request_ring)[i] = fw_data[i]; 1788 1789 mb[0] = MBC_LOAD_RAM; 1790 mb[1] = risc_address; 1791 mb[4] = cnt; 1792 mb[3] = ha->request_dma & 0xffff; 1793 mb[2] = (ha->request_dma >> 16) & 0xffff; 1794 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1795 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1796 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1797 __func__, mb[0], 1798 (void *)(long)ha->request_dma, 1799 mb[6], mb[7], mb[2], mb[3]); 1800 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1801 BIT_1 | BIT_0, mb); 1802 if (err) { 1803 printk(KERN_ERR "scsi(%li): Failed to load partial " 1804 "segment of f\n", ha->host_no); 1805 goto out; 1806 } 1807 1808#if DUMP_IT_BACK 1809 mb[0] = MBC_DUMP_RAM; 1810 mb[1] = risc_address; 1811 mb[4] = cnt; 1812 mb[3] = p_tbuf & 0xffff; 1813 mb[2] = (p_tbuf >> 16) & 0xffff; 1814 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff; 1815 mb[6] = pci_dma_hi32(p_tbuf) >> 16; 1816 1817 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1818 BIT_1 | BIT_0, mb); 1819 if (err) { 1820 printk(KERN_ERR 1821 "Failed to dump partial segment of f/w\n"); 1822 goto out; 1823 } 1824 sp = (uint8_t *)ha->request_ring; 1825 for (i = 0; i < (cnt << 1); i++) { 1826 if (tbuf[i] != sp[i] && warn++ < 10) { 1827 printk(KERN_ERR "%s: FW compare error @ " 1828 "byte(0x%x) loop#=%x\n", 1829 __func__, i, num); 1830 printk(KERN_ERR "%s: FWbyte=%x " 1831 "FWfromChip=%x\n", 1832 __func__, sp[i], tbuf[i]); 1833 /*break; */ 1834 } 1835 } 1836#endif 1837 risc_address += cnt; 1838 risc_code_size = risc_code_size - cnt; 1839 fw_data = fw_data + cnt; 1840 num++; 1841 } 1842 1843 out: 1844#if DUMP_IT_BACK 1845 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf); 1846#endif 1847 return err; 1848} 1849 1850static int 1851qla1280_start_firmware(struct scsi_qla_host *ha) 1852{ 1853 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1854 int err; 1855 1856 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 1857 __func__); 1858 1859 /* Verify checksum of loaded RISC code. */ 1860 mb[0] = MBC_VERIFY_CHECKSUM; 1861 /* mb[1] = ql12_risc_code_addr01; */ 1862 mb[1] = ha->fwstart; 1863 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 1864 if (err) { 1865 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no); 1866 return err; 1867 } 1868 1869 /* Start firmware execution. */ 1870 dprintk(1, "%s: start firmware running.\n", __func__); 1871 mb[0] = MBC_EXECUTE_FIRMWARE; 1872 mb[1] = ha->fwstart; 1873 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 1874 if (err) { 1875 printk(KERN_ERR "scsi(%li): Failed to start firmware\n", 1876 ha->host_no); 1877 } 1878 1879 return err; 1880} 1881 1882static int 1883qla1280_load_firmware(struct scsi_qla_host *ha) 1884{ 1885 /* enter with host_lock taken */ 1886 int err; 1887 1888 err = qla1280_chip_diag(ha); 1889 if (err) 1890 goto out; 1891 if (IS_ISP1040(ha)) 1892 err = qla1280_load_firmware_pio(ha); 1893 else 1894 err = qla1280_load_firmware_dma(ha); 1895 if (err) 1896 goto out; 1897 err = qla1280_start_firmware(ha); 1898 out: 1899 return err; 1900} 1901 1902/* 1903 * Initialize rings 1904 * 1905 * Input: 1906 * ha = adapter block pointer. 1907 * ha->request_ring = request ring virtual address 1908 * ha->response_ring = response ring virtual address 1909 * ha->request_dma = request ring physical address 1910 * ha->response_dma = response ring physical address 1911 * 1912 * Returns: 1913 * 0 = success. 1914 */ 1915static int 1916qla1280_init_rings(struct scsi_qla_host *ha) 1917{ 1918 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1919 int status = 0; 1920 1921 ENTER("qla1280_init_rings"); 1922 1923 /* Clear outstanding commands array. */ 1924 memset(ha->outstanding_cmds, 0, 1925 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS); 1926 1927 /* Initialize request queue. */ 1928 ha->request_ring_ptr = ha->request_ring; 1929 ha->req_ring_index = 0; 1930 ha->req_q_cnt = REQUEST_ENTRY_CNT; 1931 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */ 1932 mb[0] = MBC_INIT_REQUEST_QUEUE_A64; 1933 mb[1] = REQUEST_ENTRY_CNT; 1934 mb[3] = ha->request_dma & 0xffff; 1935 mb[2] = (ha->request_dma >> 16) & 0xffff; 1936 mb[4] = 0; 1937 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1938 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1939 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 | 1940 BIT_3 | BIT_2 | BIT_1 | BIT_0, 1941 &mb[0]))) { 1942 /* Initialize response queue. */ 1943 ha->response_ring_ptr = ha->response_ring; 1944 ha->rsp_ring_index = 0; 1945 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */ 1946 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64; 1947 mb[1] = RESPONSE_ENTRY_CNT; 1948 mb[3] = ha->response_dma & 0xffff; 1949 mb[2] = (ha->response_dma >> 16) & 0xffff; 1950 mb[5] = 0; 1951 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff; 1952 mb[6] = pci_dma_hi32(ha->response_dma) >> 16; 1953 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 | 1954 BIT_3 | BIT_2 | BIT_1 | BIT_0, 1955 &mb[0]); 1956 } 1957 1958 if (status) 1959 dprintk(2, "qla1280_init_rings: **** FAILED ****\n"); 1960 1961 LEAVE("qla1280_init_rings"); 1962 return status; 1963} 1964 1965static void 1966qla1280_print_settings(struct nvram *nv) 1967{ 1968 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n", 1969 nv->bus[0].config_1.initiator_id); 1970 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n", 1971 nv->bus[1].config_1.initiator_id); 1972 1973 dprintk(1, "qla1280 : bus reset delay[0]=%d\n", 1974 nv->bus[0].bus_reset_delay); 1975 dprintk(1, "qla1280 : bus reset delay[1]=%d\n", 1976 nv->bus[1].bus_reset_delay); 1977 1978 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count); 1979 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay); 1980 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count); 1981 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay); 1982 1983 dprintk(1, "qla1280 : async data setup time[0]=%d\n", 1984 nv->bus[0].config_2.async_data_setup_time); 1985 dprintk(1, "qla1280 : async data setup time[1]=%d\n", 1986 nv->bus[1].config_2.async_data_setup_time); 1987 1988 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n", 1989 nv->bus[0].config_2.req_ack_active_negation); 1990 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n", 1991 nv->bus[1].config_2.req_ack_active_negation); 1992 1993 dprintk(1, "qla1280 : data line active negation[0]=%d\n", 1994 nv->bus[0].config_2.data_line_active_negation); 1995 dprintk(1, "qla1280 : data line active negation[1]=%d\n", 1996 nv->bus[1].config_2.data_line_active_negation); 1997 1998 dprintk(1, "qla1280 : disable loading risc code=%d\n", 1999 nv->cntr_flags_1.disable_loading_risc_code); 2000 2001 dprintk(1, "qla1280 : enable 64bit addressing=%d\n", 2002 nv->cntr_flags_1.enable_64bit_addressing); 2003 2004 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n", 2005 nv->bus[0].selection_timeout); 2006 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n", 2007 nv->bus[1].selection_timeout); 2008 2009 dprintk(1, "qla1280 : max queue depth[0]=%d\n", 2010 nv->bus[0].max_queue_depth); 2011 dprintk(1, "qla1280 : max queue depth[1]=%d\n", 2012 nv->bus[1].max_queue_depth); 2013} 2014 2015static void 2016qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target) 2017{ 2018 struct nvram *nv = &ha->nvram; 2019 2020 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1; 2021 nv->bus[bus].target[target].parameter.auto_request_sense = 1; 2022 nv->bus[bus].target[target].parameter.tag_queuing = 1; 2023 nv->bus[bus].target[target].parameter.enable_sync = 1; 2024#if 1 /* Some SCSI Processors do not seem to like this */ 2025 nv->bus[bus].target[target].parameter.enable_wide = 1; 2026#endif 2027 nv->bus[bus].target[target].execution_throttle = 2028 nv->bus[bus].max_queue_depth - 1; 2029 nv->bus[bus].target[target].parameter.parity_checking = 1; 2030 nv->bus[bus].target[target].parameter.disconnect_allowed = 1; 2031 2032 if (IS_ISP1x160(ha)) { 2033 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1; 2034 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e; 2035 nv->bus[bus].target[target].sync_period = 9; 2036 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1; 2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2; 2038 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1; 2039 } else { 2040 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1; 2041 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12; 2042 nv->bus[bus].target[target].sync_period = 10; 2043 } 2044} 2045 2046static void 2047qla1280_set_defaults(struct scsi_qla_host *ha) 2048{ 2049 struct nvram *nv = &ha->nvram; 2050 int bus, target; 2051 2052 dprintk(1, "Using defaults for NVRAM: \n"); 2053 memset(nv, 0, sizeof(struct nvram)); 2054 2055 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */ 2056 nv->firmware_feature.f.enable_fast_posting = 1; 2057 nv->firmware_feature.f.disable_synchronous_backoff = 1; 2058 nv->termination.scsi_bus_0_control = 3; 2059 nv->termination.scsi_bus_1_control = 3; 2060 nv->termination.auto_term_support = 1; 2061 2062 /* 2063 * Set default FIFO magic - What appropriate values would be here 2064 * is unknown. This is what I have found testing with 12160s. 2065 * 2066 * Now, I would love the magic decoder ring for this one, the 2067 * header file provided by QLogic seems to be bogus or incomplete 2068 * at best. 2069 */ 2070 nv->isp_config.burst_enable = 1; 2071 if (IS_ISP1040(ha)) 2072 nv->isp_config.fifo_threshold |= 3; 2073 else 2074 nv->isp_config.fifo_threshold |= 4; 2075 2076 if (IS_ISP1x160(ha)) 2077 nv->isp_parameter = 0x01; /* fast memory enable */ 2078 2079 for (bus = 0; bus < MAX_BUSES; bus++) { 2080 nv->bus[bus].config_1.initiator_id = 7; 2081 nv->bus[bus].config_2.req_ack_active_negation = 1; 2082 nv->bus[bus].config_2.data_line_active_negation = 1; 2083 nv->bus[bus].selection_timeout = 250; 2084 nv->bus[bus].max_queue_depth = 32; 2085 2086 if (IS_ISP1040(ha)) { 2087 nv->bus[bus].bus_reset_delay = 3; 2088 nv->bus[bus].config_2.async_data_setup_time = 6; 2089 nv->bus[bus].retry_delay = 1; 2090 } else { 2091 nv->bus[bus].bus_reset_delay = 5; 2092 nv->bus[bus].config_2.async_data_setup_time = 8; 2093 } 2094 2095 for (target = 0; target < MAX_TARGETS; target++) 2096 qla1280_set_target_defaults(ha, bus, target); 2097 } 2098} 2099 2100static int 2101qla1280_config_target(struct scsi_qla_host *ha, int bus, int target) 2102{ 2103 struct nvram *nv = &ha->nvram; 2104 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2105 int status, lun; 2106 uint16_t flag; 2107 2108 /* Set Target Parameters. */ 2109 mb[0] = MBC_SET_TARGET_PARAMETERS; 2110 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2111 2112 /* 2113 * Do not enable sync and ppr for the initial INQUIRY run. We 2114 * enable this later if we determine the target actually 2115 * supports it. 2116 */ 2117 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE 2118 | TP_WIDE | TP_PARITY | TP_DISCONNECT); 2119 2120 if (IS_ISP1x160(ha)) 2121 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8; 2122 else 2123 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8; 2124 mb[3] |= nv->bus[bus].target[target].sync_period; 2125 status = qla1280_mailbox_command(ha, 0x0f, mb); 2126 2127 /* Save Tag queuing enable flag. */ 2128 flag = (BIT_0 << target); 2129 if (nv->bus[bus].target[target].parameter.tag_queuing) 2130 ha->bus_settings[bus].qtag_enables |= flag; 2131 2132 /* Save Device enable flag. */ 2133 if (IS_ISP1x160(ha)) { 2134 if (nv->bus[bus].target[target].flags.flags1x160.device_enable) 2135 ha->bus_settings[bus].device_enables |= flag; 2136 ha->bus_settings[bus].lun_disables |= 0; 2137 } else { 2138 if (nv->bus[bus].target[target].flags.flags1x80.device_enable) 2139 ha->bus_settings[bus].device_enables |= flag; 2140 /* Save LUN disable flag. */ 2141 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable) 2142 ha->bus_settings[bus].lun_disables |= flag; 2143 } 2144 2145 /* Set Device Queue Parameters. */ 2146 for (lun = 0; lun < MAX_LUNS; lun++) { 2147 mb[0] = MBC_SET_DEVICE_QUEUE; 2148 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8); 2149 mb[1] |= lun; 2150 mb[2] = nv->bus[bus].max_queue_depth; 2151 mb[3] = nv->bus[bus].target[target].execution_throttle; 2152 status |= qla1280_mailbox_command(ha, 0x0f, mb); 2153 } 2154 2155 return status; 2156} 2157 2158static int 2159qla1280_config_bus(struct scsi_qla_host *ha, int bus) 2160{ 2161 struct nvram *nv = &ha->nvram; 2162 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2163 int target, status; 2164 2165 /* SCSI Reset Disable. */ 2166 ha->bus_settings[bus].disable_scsi_reset = 2167 nv->bus[bus].config_1.scsi_reset_disable; 2168 2169 /* Initiator ID. */ 2170 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id; 2171 mb[0] = MBC_SET_INITIATOR_ID; 2172 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 : 2173 ha->bus_settings[bus].id; 2174 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2175 2176 /* Reset Delay. */ 2177 ha->bus_settings[bus].bus_reset_delay = 2178 nv->bus[bus].bus_reset_delay; 2179 2180 /* Command queue depth per device. */ 2181 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1; 2182 2183 /* Set target parameters. */ 2184 for (target = 0; target < MAX_TARGETS; target++) 2185 status |= qla1280_config_target(ha, bus, target); 2186 2187 return status; 2188} 2189 2190static int 2191qla1280_nvram_config(struct scsi_qla_host *ha) 2192{ 2193 struct device_reg __iomem *reg = ha->iobase; 2194 struct nvram *nv = &ha->nvram; 2195 int bus, target, status = 0; 2196 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2197 2198 ENTER("qla1280_nvram_config"); 2199 2200 if (ha->nvram_valid) { 2201 /* Always force AUTO sense for LINUX SCSI */ 2202 for (bus = 0; bus < MAX_BUSES; bus++) 2203 for (target = 0; target < MAX_TARGETS; target++) { 2204 nv->bus[bus].target[target].parameter. 2205 auto_request_sense = 1; 2206 } 2207 } else { 2208 qla1280_set_defaults(ha); 2209 } 2210 2211 qla1280_print_settings(nv); 2212 2213 /* Disable RISC load of firmware. */ 2214 ha->flags.disable_risc_code_load = 2215 nv->cntr_flags_1.disable_loading_risc_code; 2216 2217 if (IS_ISP1040(ha)) { 2218 uint16_t hwrev, cfg1, cdma_conf, ddma_conf; 2219 2220 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK; 2221 2222 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6); 2223 cdma_conf = RD_REG_WORD(®->cdma_cfg); 2224 ddma_conf = RD_REG_WORD(®->ddma_cfg); 2225 2226 /* Busted fifo, says mjacob. */ 2227 if (hwrev != ISP_CFG0_1040A) 2228 cfg1 |= nv->isp_config.fifo_threshold << 4; 2229 2230 cfg1 |= nv->isp_config.burst_enable << 2; 2231 WRT_REG_WORD(®->cfg_1, cfg1); 2232 2233 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB); 2234 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB); 2235 } else { 2236 uint16_t cfg1, term; 2237 2238 /* Set ISP hardware DMA burst */ 2239 cfg1 = nv->isp_config.fifo_threshold << 4; 2240 cfg1 |= nv->isp_config.burst_enable << 2; 2241 /* Enable DMA arbitration on dual channel controllers */ 2242 if (ha->ports > 1) 2243 cfg1 |= BIT_13; 2244 WRT_REG_WORD(®->cfg_1, cfg1); 2245 2246 /* Set SCSI termination. */ 2247 WRT_REG_WORD(®->gpio_enable, 2248 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0); 2249 term = nv->termination.scsi_bus_1_control; 2250 term |= nv->termination.scsi_bus_0_control << 2; 2251 term |= nv->termination.auto_term_support << 7; 2252 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2253 WRT_REG_WORD(®->gpio_data, term); 2254 } 2255 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2256 2257 /* ISP parameter word. */ 2258 mb[0] = MBC_SET_SYSTEM_PARAMETER; 2259 mb[1] = nv->isp_parameter; 2260 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 2261 2262 if (IS_ISP1x40(ha)) { 2263 /* clock rate - for qla1240 and older, only */ 2264 mb[0] = MBC_SET_CLOCK_RATE; 2265 mb[1] = 40; 2266 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2267 } 2268 2269 /* Firmware feature word. */ 2270 mb[0] = MBC_SET_FIRMWARE_FEATURES; 2271 mb[1] = nv->firmware_feature.f.enable_fast_posting; 2272 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1; 2273 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5; 2274#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) 2275 if (ia64_platform_is("sn2")) { 2276 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " 2277 "workaround\n", ha->host_no); 2278 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */ 2279 } 2280#endif 2281 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2282 2283 /* Retry count and delay. */ 2284 mb[0] = MBC_SET_RETRY_COUNT; 2285 mb[1] = nv->bus[0].retry_count; 2286 mb[2] = nv->bus[0].retry_delay; 2287 mb[6] = nv->bus[1].retry_count; 2288 mb[7] = nv->bus[1].retry_delay; 2289 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 | 2290 BIT_1 | BIT_0, &mb[0]); 2291 2292 /* ASYNC data setup time. */ 2293 mb[0] = MBC_SET_ASYNC_DATA_SETUP; 2294 mb[1] = nv->bus[0].config_2.async_data_setup_time; 2295 mb[2] = nv->bus[1].config_2.async_data_setup_time; 2296 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2297 2298 /* Active negation states. */ 2299 mb[0] = MBC_SET_ACTIVE_NEGATION; 2300 mb[1] = 0; 2301 if (nv->bus[0].config_2.req_ack_active_negation) 2302 mb[1] |= BIT_5; 2303 if (nv->bus[0].config_2.data_line_active_negation) 2304 mb[1] |= BIT_4; 2305 mb[2] = 0; 2306 if (nv->bus[1].config_2.req_ack_active_negation) 2307 mb[2] |= BIT_5; 2308 if (nv->bus[1].config_2.data_line_active_negation) 2309 mb[2] |= BIT_4; 2310 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2311 2312 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY; 2313 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */ 2314 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2315 2316 /* thingy */ 2317 mb[0] = MBC_SET_PCI_CONTROL; 2318 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */ 2319 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */ 2320 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2321 2322 mb[0] = MBC_SET_TAG_AGE_LIMIT; 2323 mb[1] = 8; 2324 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); 2325 2326 /* Selection timeout. */ 2327 mb[0] = MBC_SET_SELECTION_TIMEOUT; 2328 mb[1] = nv->bus[0].selection_timeout; 2329 mb[2] = nv->bus[1].selection_timeout; 2330 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb); 2331 2332 for (bus = 0; bus < ha->ports; bus++) 2333 status |= qla1280_config_bus(ha, bus); 2334 2335 if (status) 2336 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n"); 2337 2338 LEAVE("qla1280_nvram_config"); 2339 return status; 2340} 2341 2342/* 2343 * Get NVRAM data word 2344 * Calculates word position in NVRAM and calls request routine to 2345 * get the word from NVRAM. 2346 * 2347 * Input: 2348 * ha = adapter block pointer. 2349 * address = NVRAM word address. 2350 * 2351 * Returns: 2352 * data word. 2353 */ 2354static uint16_t 2355qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address) 2356{ 2357 uint32_t nv_cmd; 2358 uint16_t data; 2359 2360 nv_cmd = address << 16; 2361 nv_cmd |= NV_READ_OP; 2362 2363 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd)); 2364 2365 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = " 2366 "0x%x", data); 2367 2368 return data; 2369} 2370 2371/* 2372 * NVRAM request 2373 * Sends read command to NVRAM and gets data from NVRAM. 2374 * 2375 * Input: 2376 * ha = adapter block pointer. 2377 * nv_cmd = Bit 26 = start bit 2378 * Bit 25, 24 = opcode 2379 * Bit 23-16 = address 2380 * Bit 15-0 = write data 2381 * 2382 * Returns: 2383 * data word. 2384 */ 2385static uint16_t 2386qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd) 2387{ 2388 struct device_reg __iomem *reg = ha->iobase; 2389 int cnt; 2390 uint16_t data = 0; 2391 uint16_t reg_data; 2392 2393 /* Send command to NVRAM. */ 2394 2395 nv_cmd <<= 5; 2396 for (cnt = 0; cnt < 11; cnt++) { 2397 if (nv_cmd & BIT_31) 2398 qla1280_nv_write(ha, NV_DATA_OUT); 2399 else 2400 qla1280_nv_write(ha, 0); 2401 nv_cmd <<= 1; 2402 } 2403 2404 /* Read data from NVRAM. */ 2405 2406 for (cnt = 0; cnt < 16; cnt++) { 2407 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK)); 2408 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2409 NVRAM_DELAY(); 2410 data <<= 1; 2411 reg_data = RD_REG_WORD(®->nvram); 2412 if (reg_data & NV_DATA_IN) 2413 data |= BIT_0; 2414 WRT_REG_WORD(®->nvram, NV_SELECT); 2415 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2416 NVRAM_DELAY(); 2417 } 2418 2419 /* Deselect chip. */ 2420 2421 WRT_REG_WORD(®->nvram, NV_DESELECT); 2422 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2423 NVRAM_DELAY(); 2424 2425 return data; 2426} 2427 2428static void 2429qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data) 2430{ 2431 struct device_reg __iomem *reg = ha->iobase; 2432 2433 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2434 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2435 NVRAM_DELAY(); 2436 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK); 2437 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2438 NVRAM_DELAY(); 2439 WRT_REG_WORD(®->nvram, data | NV_SELECT); 2440 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2441 NVRAM_DELAY(); 2442} 2443 2444/* 2445 * Mailbox Command 2446 * Issue mailbox command and waits for completion. 2447 * 2448 * Input: 2449 * ha = adapter block pointer. 2450 * mr = mailbox registers to load. 2451 * mb = data pointer for mailbox registers. 2452 * 2453 * Output: 2454 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data. 2455 * 2456 * Returns: 2457 * 0 = success 2458 */ 2459static int 2460qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) 2461{ 2462 struct device_reg __iomem *reg = ha->iobase; 2463 int status = 0; 2464 int cnt; 2465 uint16_t *optr, *iptr; 2466 uint16_t __iomem *mptr; 2467 uint16_t data; 2468 DECLARE_COMPLETION_ONSTACK(wait); 2469 struct timer_list timer; 2470 2471 ENTER("qla1280_mailbox_command"); 2472 2473 if (ha->mailbox_wait) { 2474 printk(KERN_ERR "Warning mailbox wait already in use!\n"); 2475 } 2476 ha->mailbox_wait = &wait; 2477 2478 /* 2479 * We really should start out by verifying that the mailbox is 2480 * available before starting sending the command data 2481 */ 2482 /* Load mailbox registers. */ 2483 mptr = (uint16_t __iomem *) ®->mailbox0; 2484 iptr = mb; 2485 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) { 2486 if (mr & BIT_0) { 2487 WRT_REG_WORD(mptr, (*iptr)); 2488 } 2489 2490 mr >>= 1; 2491 mptr++; 2492 iptr++; 2493 } 2494 2495 /* Issue set host interrupt command. */ 2496 2497 /* set up a timer just in case we're really jammed */ 2498 init_timer_on_stack(&timer); 2499 timer.expires = jiffies + 20*HZ; 2500 timer.data = (unsigned long)ha; 2501 timer.function = qla1280_mailbox_timeout; 2502 add_timer(&timer); 2503 2504 spin_unlock_irq(ha->host->host_lock); 2505 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT); 2506 data = qla1280_debounce_register(®->istatus); 2507 2508 wait_for_completion(&wait); 2509 del_timer_sync(&timer); 2510 2511 spin_lock_irq(ha->host->host_lock); 2512 2513 ha->mailbox_wait = NULL; 2514 2515 /* Check for mailbox command timeout. */ 2516 if (ha->mailbox_out[0] != MBS_CMD_CMP) { 2517 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, " 2518 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = " 2519 "0x%04x\n", 2520 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus)); 2521 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n", 2522 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1), 2523 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3)); 2524 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n", 2525 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5), 2526 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7)); 2527 status = 1; 2528 } 2529 2530 /* Load return mailbox registers. */ 2531 optr = mb; 2532 iptr = (uint16_t *) &ha->mailbox_out[0]; 2533 mr = MAILBOX_REGISTER_COUNT; 2534 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); 2535 2536 if (ha->flags.reset_marker) 2537 qla1280_rst_aen(ha); 2538 2539 if (status) 2540 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " 2541 "0x%x ****\n", mb[0]); 2542 2543 LEAVE("qla1280_mailbox_command"); 2544 return status; 2545} 2546 2547/* 2548 * qla1280_poll 2549 * Polls ISP for interrupts. 2550 * 2551 * Input: 2552 * ha = adapter block pointer. 2553 */ 2554static void 2555qla1280_poll(struct scsi_qla_host *ha) 2556{ 2557 struct device_reg __iomem *reg = ha->iobase; 2558 uint16_t data; 2559 LIST_HEAD(done_q); 2560 2561 /* ENTER("qla1280_poll"); */ 2562 2563 /* Check for pending interrupts. */ 2564 data = RD_REG_WORD(®->istatus); 2565 if (data & RISC_INT) 2566 qla1280_isr(ha, &done_q); 2567 2568 if (!ha->mailbox_wait) { 2569 if (ha->flags.reset_marker) 2570 qla1280_rst_aen(ha); 2571 } 2572 2573 if (!list_empty(&done_q)) 2574 qla1280_done(ha); 2575 2576 /* LEAVE("qla1280_poll"); */ 2577} 2578 2579/* 2580 * qla1280_bus_reset 2581 * Issue SCSI bus reset. 2582 * 2583 * Input: 2584 * ha = adapter block pointer. 2585 * bus = SCSI bus number. 2586 * 2587 * Returns: 2588 * 0 = success 2589 */ 2590static int 2591qla1280_bus_reset(struct scsi_qla_host *ha, int bus) 2592{ 2593 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2594 uint16_t reset_delay; 2595 int status; 2596 2597 dprintk(3, "qla1280_bus_reset: entered\n"); 2598 2599 if (qla1280_verbose) 2600 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n", 2601 ha->host_no, bus); 2602 2603 reset_delay = ha->bus_settings[bus].bus_reset_delay; 2604 mb[0] = MBC_BUS_RESET; 2605 mb[1] = reset_delay; 2606 mb[2] = (uint16_t) bus; 2607 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2608 2609 if (status) { 2610 if (ha->bus_settings[bus].failed_reset_count > 2) 2611 ha->bus_settings[bus].scsi_bus_dead = 1; 2612 ha->bus_settings[bus].failed_reset_count++; 2613 } else { 2614 spin_unlock_irq(ha->host->host_lock); 2615 ssleep(reset_delay); 2616 spin_lock_irq(ha->host->host_lock); 2617 2618 ha->bus_settings[bus].scsi_bus_dead = 0; 2619 ha->bus_settings[bus].failed_reset_count = 0; 2620 ha->bus_settings[bus].reset_marker = 0; 2621 /* Issue marker command. */ 2622 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL); 2623 } 2624 2625 /* 2626 * We should probably call qla1280_set_target_parameters() 2627 * here as well for all devices on the bus. 2628 */ 2629 2630 if (status) 2631 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n"); 2632 else 2633 dprintk(3, "qla1280_bus_reset: exiting normally\n"); 2634 2635 return status; 2636} 2637 2638/* 2639 * qla1280_device_reset 2640 * Issue bus device reset message to the target. 2641 * 2642 * Input: 2643 * ha = adapter block pointer. 2644 * bus = SCSI BUS number. 2645 * target = SCSI ID. 2646 * 2647 * Returns: 2648 * 0 = success 2649 */ 2650static int 2651qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) 2652{ 2653 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2654 int status; 2655 2656 ENTER("qla1280_device_reset"); 2657 2658 mb[0] = MBC_ABORT_TARGET; 2659 mb[1] = (bus ? (target | BIT_7) : target) << 8; 2660 mb[2] = 1; 2661 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]); 2662 2663 /* Issue marker command. */ 2664 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); 2665 2666 if (status) 2667 dprintk(2, "qla1280_device_reset: **** FAILED ****\n"); 2668 2669 LEAVE("qla1280_device_reset"); 2670 return status; 2671} 2672 2673/* 2674 * qla1280_abort_command 2675 * Abort command aborts a specified IOCB. 2676 * 2677 * Input: 2678 * ha = adapter block pointer. 2679 * sp = SB structure pointer. 2680 * 2681 * Returns: 2682 * 0 = success 2683 */ 2684static int 2685qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle) 2686{ 2687 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2688 unsigned int bus, target, lun; 2689 int status; 2690 2691 ENTER("qla1280_abort_command"); 2692 2693 bus = SCSI_BUS_32(sp->cmd); 2694 target = SCSI_TCN_32(sp->cmd); 2695 lun = SCSI_LUN_32(sp->cmd); 2696 2697 sp->flags |= SRB_ABORT_PENDING; 2698 2699 mb[0] = MBC_ABORT_COMMAND; 2700 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; 2701 mb[2] = handle >> 16; 2702 mb[3] = handle & 0xffff; 2703 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]); 2704 2705 if (status) { 2706 dprintk(2, "qla1280_abort_command: **** FAILED ****\n"); 2707 sp->flags &= ~SRB_ABORT_PENDING; 2708 } 2709 2710 2711 LEAVE("qla1280_abort_command"); 2712 return status; 2713} 2714 2715/* 2716 * qla1280_reset_adapter 2717 * Reset adapter. 2718 * 2719 * Input: 2720 * ha = adapter block pointer. 2721 */ 2722static void 2723qla1280_reset_adapter(struct scsi_qla_host *ha) 2724{ 2725 struct device_reg __iomem *reg = ha->iobase; 2726 2727 ENTER("qla1280_reset_adapter"); 2728 2729 /* Disable ISP chip */ 2730 ha->flags.online = 0; 2731 WRT_REG_WORD(®->ictrl, ISP_RESET); 2732 WRT_REG_WORD(®->host_cmd, 2733 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS); 2734 RD_REG_WORD(®->id_l); /* Flush PCI write */ 2735 2736 LEAVE("qla1280_reset_adapter"); 2737} 2738 2739/* 2740 * Issue marker command. 2741 * Function issues marker IOCB. 2742 * 2743 * Input: 2744 * ha = adapter block pointer. 2745 * bus = SCSI BUS number 2746 * id = SCSI ID 2747 * lun = SCSI LUN 2748 * type = marker modifier 2749 */ 2750static void 2751qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type) 2752{ 2753 struct mrk_entry *pkt; 2754 2755 ENTER("qla1280_marker"); 2756 2757 /* Get request packet. */ 2758 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) { 2759 pkt->entry_type = MARKER_TYPE; 2760 pkt->lun = (uint8_t) lun; 2761 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id); 2762 pkt->modifier = type; 2763 pkt->entry_status = 0; 2764 2765 /* Issue command to ISP */ 2766 qla1280_isp_cmd(ha); 2767 } 2768 2769 LEAVE("qla1280_marker"); 2770} 2771 2772 2773/* 2774 * qla1280_64bit_start_scsi 2775 * The start SCSI is responsible for building request packets on 2776 * request ring and modifying ISP input pointer. 2777 * 2778 * Input: 2779 * ha = adapter block pointer. 2780 * sp = SB structure pointer. 2781 * 2782 * Returns: 2783 * 0 = success, was able to issue command. 2784 */ 2785#ifdef QLA_64BIT_PTR 2786static int 2787qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 2788{ 2789 struct device_reg __iomem *reg = ha->iobase; 2790 struct scsi_cmnd *cmd = sp->cmd; 2791 cmd_a64_entry_t *pkt; 2792 __le32 *dword_ptr; 2793 dma_addr_t dma_handle; 2794 int status = 0; 2795 int cnt; 2796 int req_cnt; 2797 int seg_cnt; 2798 u8 dir; 2799 2800 ENTER("qla1280_64bit_start_scsi:"); 2801 2802 /* Calculate number of entries and segments required. */ 2803 req_cnt = 1; 2804 seg_cnt = scsi_dma_map(cmd); 2805 if (seg_cnt > 0) { 2806 if (seg_cnt > 2) { 2807 req_cnt += (seg_cnt - 2) / 5; 2808 if ((seg_cnt - 2) % 5) 2809 req_cnt++; 2810 } 2811 } else if (seg_cnt < 0) { 2812 status = 1; 2813 goto out; 2814 } 2815 2816 if ((req_cnt + 2) >= ha->req_q_cnt) { 2817 /* Calculate number of free request entries. */ 2818 cnt = RD_REG_WORD(®->mailbox4); 2819 if (ha->req_ring_index < cnt) 2820 ha->req_q_cnt = cnt - ha->req_ring_index; 2821 else 2822 ha->req_q_cnt = 2823 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 2824 } 2825 2826 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 2827 ha->req_q_cnt, seg_cnt); 2828 2829 /* If room for request in request ring. */ 2830 if ((req_cnt + 2) >= ha->req_q_cnt) { 2831 status = SCSI_MLQUEUE_HOST_BUSY; 2832 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" 2833 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, 2834 req_cnt); 2835 goto out; 2836 } 2837 2838 /* Check for room in outstanding command list. */ 2839 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 2840 ha->outstanding_cmds[cnt] != NULL; cnt++); 2841 2842 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 2843 status = SCSI_MLQUEUE_HOST_BUSY; 2844 dprintk(2, "qla1280_start_scsi: NO ROOM IN " 2845 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); 2846 goto out; 2847 } 2848 2849 ha->outstanding_cmds[cnt] = sp; 2850 ha->req_q_cnt -= req_cnt; 2851 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1); 2852 2853 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp, 2854 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd)); 2855 dprintk(2, " bus %i, target %i, lun %i\n", 2856 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 2857 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE); 2858 2859 /* 2860 * Build command packet. 2861 */ 2862 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr; 2863 2864 pkt->entry_type = COMMAND_A64_TYPE; 2865 pkt->entry_count = (uint8_t) req_cnt; 2866 pkt->sys_define = (uint8_t) ha->req_ring_index; 2867 pkt->entry_status = 0; 2868 pkt->handle = cpu_to_le32(cnt); 2869 2870 /* Zero out remaining portion of packet. */ 2871 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 2872 2873 /* Set ISP command timeout. */ 2874 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); 2875 2876 /* Set device target ID and LUN */ 2877 pkt->lun = SCSI_LUN_32(cmd); 2878 pkt->target = SCSI_BUS_32(cmd) ? 2879 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 2880 2881 /* Enable simple tag queuing if device supports it. */ 2882 if (cmd->device->simple_tags) 2883 pkt->control_flags |= cpu_to_le16(BIT_3); 2884 2885 /* Load SCSI command packet. */ 2886 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 2887 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd)); 2888 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 2889 2890 /* Set transfer direction. */ 2891 dir = qla1280_data_direction(cmd); 2892 pkt->control_flags |= cpu_to_le16(dir); 2893 2894 /* Set total data segment count. */ 2895 pkt->dseg_count = cpu_to_le16(seg_cnt); 2896 2897 /* 2898 * Load data segments. 2899 */ 2900 if (seg_cnt) { /* If data transfer. */ 2901 struct scatterlist *sg, *s; 2902 int remseg = seg_cnt; 2903 2904 sg = scsi_sglist(cmd); 2905 2906 /* Setup packet address segment pointer. */ 2907 dword_ptr = (u32 *)&pkt->dseg_0_address; 2908 2909 /* Load command entry data segments. */ 2910 for_each_sg(sg, s, seg_cnt, cnt) { 2911 if (cnt == 2) 2912 break; 2913 2914 dma_handle = sg_dma_address(s); 2915#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2916 if (ha->flags.use_pci_vchannel) 2917 sn_pci_set_vchan(ha->pdev, 2918 (unsigned long *)&dma_handle, 2919 SCSI_BUS_32(cmd)); 2920#endif 2921 *dword_ptr++ = 2922 cpu_to_le32(pci_dma_lo32(dma_handle)); 2923 *dword_ptr++ = 2924 cpu_to_le32(pci_dma_hi32(dma_handle)); 2925 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 2926 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", 2927 cpu_to_le32(pci_dma_hi32(dma_handle)), 2928 cpu_to_le32(pci_dma_lo32(dma_handle)), 2929 cpu_to_le32(sg_dma_len(sg_next(s)))); 2930 remseg--; 2931 } 2932 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " 2933 "command packet data - b %i, t %i, l %i \n", 2934 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), 2935 SCSI_LUN_32(cmd)); 2936 qla1280_dump_buffer(5, (char *)pkt, 2937 REQUEST_ENTRY_SIZE); 2938 2939 /* 2940 * Build continuation packets. 2941 */ 2942 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x " 2943 "remains\n", seg_cnt); 2944 2945 while (remseg > 0) { 2946 /* Update sg start */ 2947 sg = s; 2948 /* Adjust ring index. */ 2949 ha->req_ring_index++; 2950 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 2951 ha->req_ring_index = 0; 2952 ha->request_ring_ptr = 2953 ha->request_ring; 2954 } else 2955 ha->request_ring_ptr++; 2956 2957 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr; 2958 2959 /* Zero out packet. */ 2960 memset(pkt, 0, REQUEST_ENTRY_SIZE); 2961 2962 /* Load packet defaults. */ 2963 ((struct cont_a64_entry *) pkt)->entry_type = 2964 CONTINUE_A64_TYPE; 2965 ((struct cont_a64_entry *) pkt)->entry_count = 1; 2966 ((struct cont_a64_entry *) pkt)->sys_define = 2967 (uint8_t)ha->req_ring_index; 2968 /* Setup packet address segment pointer. */ 2969 dword_ptr = 2970 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address; 2971 2972 /* Load continuation entry data segments. */ 2973 for_each_sg(sg, s, remseg, cnt) { 2974 if (cnt == 5) 2975 break; 2976 dma_handle = sg_dma_address(s); 2977#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) 2978 if (ha->flags.use_pci_vchannel) 2979 sn_pci_set_vchan(ha->pdev, 2980 (unsigned long *)&dma_handle, 2981 SCSI_BUS_32(cmd)); 2982#endif 2983 *dword_ptr++ = 2984 cpu_to_le32(pci_dma_lo32(dma_handle)); 2985 *dword_ptr++ = 2986 cpu_to_le32(pci_dma_hi32(dma_handle)); 2987 *dword_ptr++ = 2988 cpu_to_le32(sg_dma_len(s)); 2989 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n", 2990 cpu_to_le32(pci_dma_hi32(dma_handle)), 2991 cpu_to_le32(pci_dma_lo32(dma_handle)), 2992 cpu_to_le32(sg_dma_len(s))); 2993 } 2994 remseg -= cnt; 2995 dprintk(5, "qla1280_64bit_start_scsi: " 2996 "continuation packet data - b %i, t " 2997 "%i, l %i \n", SCSI_BUS_32(cmd), 2998 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 2999 qla1280_dump_buffer(5, (char *)pkt, 3000 REQUEST_ENTRY_SIZE); 3001 } 3002 } else { /* No data transfer */ 3003 dprintk(5, "qla1280_64bit_start_scsi: No data, command " 3004 "packet data - b %i, t %i, l %i \n", 3005 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3006 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3007 } 3008 /* Adjust ring index. */ 3009 ha->req_ring_index++; 3010 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3011 ha->req_ring_index = 0; 3012 ha->request_ring_ptr = ha->request_ring; 3013 } else 3014 ha->request_ring_ptr++; 3015 3016 /* Set chip new ring index. */ 3017 dprintk(2, 3018 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n"); 3019 sp->flags |= SRB_SENT; 3020 ha->actthreads++; 3021 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3022 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3023 mmiowb(); 3024 3025 out: 3026 if (status) 3027 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n"); 3028 else 3029 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n"); 3030 3031 return status; 3032} 3033#else /* !QLA_64BIT_PTR */ 3034 3035/* 3036 * qla1280_32bit_start_scsi 3037 * The start SCSI is responsible for building request packets on 3038 * request ring and modifying ISP input pointer. 3039 * 3040 * The Qlogic firmware interface allows every queue slot to have a SCSI 3041 * command and up to 4 scatter/gather (SG) entries. If we need more 3042 * than 4 SG entries, then continuation entries are used that can 3043 * hold another 7 entries each. The start routine determines if there 3044 * is eought empty slots then build the combination of requests to 3045 * fulfill the OS request. 3046 * 3047 * Input: 3048 * ha = adapter block pointer. 3049 * sp = SCSI Request Block structure pointer. 3050 * 3051 * Returns: 3052 * 0 = success, was able to issue command. 3053 */ 3054static int 3055qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) 3056{ 3057 struct device_reg __iomem *reg = ha->iobase; 3058 struct scsi_cmnd *cmd = sp->cmd; 3059 struct cmd_entry *pkt; 3060 __le32 *dword_ptr; 3061 int status = 0; 3062 int cnt; 3063 int req_cnt; 3064 int seg_cnt; 3065 u8 dir; 3066 3067 ENTER("qla1280_32bit_start_scsi"); 3068 3069 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp, 3070 cmd->cmnd[0]); 3071 3072 /* Calculate number of entries and segments required. */ 3073 req_cnt = 1; 3074 seg_cnt = scsi_dma_map(cmd); 3075 if (seg_cnt) { 3076 /* 3077 * if greater than four sg entries then we need to allocate 3078 * continuation entries 3079 */ 3080 if (seg_cnt > 4) { 3081 req_cnt += (seg_cnt - 4) / 7; 3082 if ((seg_cnt - 4) % 7) 3083 req_cnt++; 3084 } 3085 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n", 3086 cmd, seg_cnt, req_cnt); 3087 } else if (seg_cnt < 0) { 3088 status = 1; 3089 goto out; 3090 } 3091 3092 if ((req_cnt + 2) >= ha->req_q_cnt) { 3093 /* Calculate number of free request entries. */ 3094 cnt = RD_REG_WORD(®->mailbox4); 3095 if (ha->req_ring_index < cnt) 3096 ha->req_q_cnt = cnt - ha->req_ring_index; 3097 else 3098 ha->req_q_cnt = 3099 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3100 } 3101 3102 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n", 3103 ha->req_q_cnt, seg_cnt); 3104 /* If room for request in request ring. */ 3105 if ((req_cnt + 2) >= ha->req_q_cnt) { 3106 status = SCSI_MLQUEUE_HOST_BUSY; 3107 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " 3108 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, 3109 ha->req_q_cnt, req_cnt); 3110 goto out; 3111 } 3112 3113 /* Check for empty slot in outstanding command list. */ 3114 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS && 3115 (ha->outstanding_cmds[cnt] != 0); cnt++) ; 3116 3117 if (cnt >= MAX_OUTSTANDING_COMMANDS) { 3118 status = SCSI_MLQUEUE_HOST_BUSY; 3119 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " 3120 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); 3121 goto out; 3122 } 3123 3124 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1); 3125 ha->outstanding_cmds[cnt] = sp; 3126 ha->req_q_cnt -= req_cnt; 3127 3128 /* 3129 * Build command packet. 3130 */ 3131 pkt = (struct cmd_entry *) ha->request_ring_ptr; 3132 3133 pkt->entry_type = COMMAND_TYPE; 3134 pkt->entry_count = (uint8_t) req_cnt; 3135 pkt->sys_define = (uint8_t) ha->req_ring_index; 3136 pkt->entry_status = 0; 3137 pkt->handle = cpu_to_le32(cnt); 3138 3139 /* Zero out remaining portion of packet. */ 3140 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); 3141 3142 /* Set ISP command timeout. */ 3143 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); 3144 3145 /* Set device target ID and LUN */ 3146 pkt->lun = SCSI_LUN_32(cmd); 3147 pkt->target = SCSI_BUS_32(cmd) ? 3148 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd); 3149 3150 /* Enable simple tag queuing if device supports it. */ 3151 if (cmd->device->simple_tags) 3152 pkt->control_flags |= cpu_to_le16(BIT_3); 3153 3154 /* Load SCSI command packet. */ 3155 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd)); 3156 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd)); 3157 3158 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */ 3159 /* Set transfer direction. */ 3160 dir = qla1280_data_direction(cmd); 3161 pkt->control_flags |= cpu_to_le16(dir); 3162 3163 /* Set total data segment count. */ 3164 pkt->dseg_count = cpu_to_le16(seg_cnt); 3165 3166 /* 3167 * Load data segments. 3168 */ 3169 if (seg_cnt) { 3170 struct scatterlist *sg, *s; 3171 int remseg = seg_cnt; 3172 3173 sg = scsi_sglist(cmd); 3174 3175 /* Setup packet address segment pointer. */ 3176 dword_ptr = &pkt->dseg_0_address; 3177 3178 dprintk(3, "Building S/G data segments..\n"); 3179 qla1280_dump_buffer(1, (char *)sg, 4 * 16); 3180 3181 /* Load command entry data segments. */ 3182 for_each_sg(sg, s, seg_cnt, cnt) { 3183 if (cnt == 4) 3184 break; 3185 *dword_ptr++ = 3186 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3187 *dword_ptr++ = cpu_to_le32(sg_dma_len(s)); 3188 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n", 3189 (pci_dma_lo32(sg_dma_address(s))), 3190 (sg_dma_len(s))); 3191 remseg--; 3192 } 3193 /* 3194 * Build continuation packets. 3195 */ 3196 dprintk(3, "S/G Building Continuation" 3197 "...seg_cnt=0x%x remains\n", seg_cnt); 3198 while (remseg > 0) { 3199 /* Continue from end point */ 3200 sg = s; 3201 /* Adjust ring index. */ 3202 ha->req_ring_index++; 3203 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3204 ha->req_ring_index = 0; 3205 ha->request_ring_ptr = 3206 ha->request_ring; 3207 } else 3208 ha->request_ring_ptr++; 3209 3210 pkt = (struct cmd_entry *)ha->request_ring_ptr; 3211 3212 /* Zero out packet. */ 3213 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3214 3215 /* Load packet defaults. */ 3216 ((struct cont_entry *) pkt)-> 3217 entry_type = CONTINUE_TYPE; 3218 ((struct cont_entry *) pkt)->entry_count = 1; 3219 3220 ((struct cont_entry *) pkt)->sys_define = 3221 (uint8_t) ha->req_ring_index; 3222 3223 /* Setup packet address segment pointer. */ 3224 dword_ptr = 3225 &((struct cont_entry *) pkt)->dseg_0_address; 3226 3227 /* Load continuation entry data segments. */ 3228 for_each_sg(sg, s, remseg, cnt) { 3229 if (cnt == 7) 3230 break; 3231 *dword_ptr++ = 3232 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))); 3233 *dword_ptr++ = 3234 cpu_to_le32(sg_dma_len(s)); 3235 dprintk(1, 3236 "S/G Segment Cont. phys_addr=0x%x, " 3237 "len=0x%x\n", 3238 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))), 3239 cpu_to_le32(sg_dma_len(s))); 3240 } 3241 remseg -= cnt; 3242 dprintk(5, "qla1280_32bit_start_scsi: " 3243 "continuation packet data - " 3244 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd), 3245 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd)); 3246 qla1280_dump_buffer(5, (char *)pkt, 3247 REQUEST_ENTRY_SIZE); 3248 } 3249 } else { /* No data transfer at all */ 3250 dprintk(5, "qla1280_32bit_start_scsi: No data, command " 3251 "packet data - \n"); 3252 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE); 3253 } 3254 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n"); 3255 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3256 REQUEST_ENTRY_SIZE); 3257 3258 /* Adjust ring index. */ 3259 ha->req_ring_index++; 3260 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3261 ha->req_ring_index = 0; 3262 ha->request_ring_ptr = ha->request_ring; 3263 } else 3264 ha->request_ring_ptr++; 3265 3266 /* Set chip new ring index. */ 3267 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC " 3268 "for pending command\n"); 3269 sp->flags |= SRB_SENT; 3270 ha->actthreads++; 3271 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3272 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */ 3273 mmiowb(); 3274 3275out: 3276 if (status) 3277 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n"); 3278 3279 LEAVE("qla1280_32bit_start_scsi"); 3280 3281 return status; 3282} 3283#endif 3284 3285/* 3286 * qla1280_req_pkt 3287 * Function is responsible for locking ring and 3288 * getting a zeroed out request packet. 3289 * 3290 * Input: 3291 * ha = adapter block pointer. 3292 * 3293 * Returns: 3294 * 0 = failed to get slot. 3295 */ 3296static request_t * 3297qla1280_req_pkt(struct scsi_qla_host *ha) 3298{ 3299 struct device_reg __iomem *reg = ha->iobase; 3300 request_t *pkt = NULL; 3301 int cnt; 3302 uint32_t timer; 3303 3304 ENTER("qla1280_req_pkt"); 3305 3306 /* 3307 * This can be called from interrupt context, damn it!!! 3308 */ 3309 /* Wait for 30 seconds for slot. */ 3310 for (timer = 15000000; timer; timer--) { 3311 if (ha->req_q_cnt > 0) { 3312 /* Calculate number of free request entries. */ 3313 cnt = RD_REG_WORD(®->mailbox4); 3314 if (ha->req_ring_index < cnt) 3315 ha->req_q_cnt = cnt - ha->req_ring_index; 3316 else 3317 ha->req_q_cnt = 3318 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt); 3319 } 3320 3321 /* Found empty request ring slot? */ 3322 if (ha->req_q_cnt > 0) { 3323 ha->req_q_cnt--; 3324 pkt = ha->request_ring_ptr; 3325 3326 /* Zero out packet. */ 3327 memset(pkt, 0, REQUEST_ENTRY_SIZE); 3328 3329 /* 3330 * How can this be right when we have a ring 3331 * size of 512??? 3332 */ 3333 /* Set system defined field. */ 3334 pkt->sys_define = (uint8_t) ha->req_ring_index; 3335 3336 /* Set entry count. */ 3337 pkt->entry_count = 1; 3338 3339 break; 3340 } 3341 3342 udelay(2); /* 10 */ 3343 3344 /* Check for pending interrupts. */ 3345 qla1280_poll(ha); 3346 } 3347 3348 if (!pkt) 3349 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n"); 3350 else 3351 dprintk(3, "qla1280_req_pkt: exiting normally\n"); 3352 3353 return pkt; 3354} 3355 3356/* 3357 * qla1280_isp_cmd 3358 * Function is responsible for modifying ISP input pointer. 3359 * Releases ring lock. 3360 * 3361 * Input: 3362 * ha = adapter block pointer. 3363 */ 3364static void 3365qla1280_isp_cmd(struct scsi_qla_host *ha) 3366{ 3367 struct device_reg __iomem *reg = ha->iobase; 3368 3369 ENTER("qla1280_isp_cmd"); 3370 3371 dprintk(5, "qla1280_isp_cmd: IOCB data:\n"); 3372 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr, 3373 REQUEST_ENTRY_SIZE); 3374 3375 /* Adjust ring index. */ 3376 ha->req_ring_index++; 3377 if (ha->req_ring_index == REQUEST_ENTRY_CNT) { 3378 ha->req_ring_index = 0; 3379 ha->request_ring_ptr = ha->request_ring; 3380 } else 3381 ha->request_ring_ptr++; 3382 3383 /* 3384 * Update request index to mailbox4 (Request Queue In). 3385 * The mmiowb() ensures that this write is ordered with writes by other 3386 * CPUs. Without the mmiowb(), it is possible for the following: 3387 * CPUA posts write of index 5 to mailbox4 3388 * CPUA releases host lock 3389 * CPUB acquires host lock 3390 * CPUB posts write of index 6 to mailbox4 3391 * On PCI bus, order reverses and write of 6 posts, then index 5, 3392 * causing chip to issue full queue of stale commands 3393 * The mmiowb() prevents future writes from crossing the barrier. 3394 * See Documentation/DocBook/deviceiobook.tmpl for more information. 3395 */ 3396 WRT_REG_WORD(®->mailbox4, ha->req_ring_index); 3397 mmiowb(); 3398 3399 LEAVE("qla1280_isp_cmd"); 3400} 3401 3402/****************************************************************************/ 3403/* Interrupt Service Routine. */ 3404/****************************************************************************/ 3405 3406/**************************************************************************** 3407 * qla1280_isr 3408 * Calls I/O done on command completion. 3409 * 3410 * Input: 3411 * ha = adapter block pointer. 3412 * done_q = done queue. 3413 ****************************************************************************/ 3414static void 3415qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) 3416{ 3417 struct device_reg __iomem *reg = ha->iobase; 3418 struct response *pkt; 3419 struct srb *sp = NULL; 3420 uint16_t mailbox[MAILBOX_REGISTER_COUNT]; 3421 uint16_t *wptr; 3422 uint32_t index; 3423 u16 istatus; 3424 3425 ENTER("qla1280_isr"); 3426 3427 istatus = RD_REG_WORD(®->istatus); 3428 if (!(istatus & (RISC_INT | PCI_INT))) 3429 return; 3430 3431 /* Save mailbox register 5 */ 3432 mailbox[5] = RD_REG_WORD(®->mailbox5); 3433 3434 /* Check for mailbox interrupt. */ 3435 3436 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore); 3437 3438 if (mailbox[0] & BIT_0) { 3439 /* Get mailbox data. */ 3440 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */ 3441 3442 wptr = &mailbox[0]; 3443 *wptr++ = RD_REG_WORD(®->mailbox0); 3444 *wptr++ = RD_REG_WORD(®->mailbox1); 3445 *wptr = RD_REG_WORD(®->mailbox2); 3446 if (mailbox[0] != MBA_SCSI_COMPLETION) { 3447 wptr++; 3448 *wptr++ = RD_REG_WORD(®->mailbox3); 3449 *wptr++ = RD_REG_WORD(®->mailbox4); 3450 wptr++; 3451 *wptr++ = RD_REG_WORD(®->mailbox6); 3452 *wptr = RD_REG_WORD(®->mailbox7); 3453 } 3454 3455 /* Release mailbox registers. */ 3456 3457 WRT_REG_WORD(®->semaphore, 0); 3458 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3459 3460 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x", 3461 mailbox[0]); 3462 3463 /* Handle asynchronous event */ 3464 switch (mailbox[0]) { 3465 case MBA_SCSI_COMPLETION: /* Response completion */ 3466 dprintk(5, "qla1280_isr: mailbox SCSI response " 3467 "completion\n"); 3468 3469 if (ha->flags.online) { 3470 /* Get outstanding command index. */ 3471 index = mailbox[2] << 16 | mailbox[1]; 3472 3473 /* Validate handle. */ 3474 if (index < MAX_OUTSTANDING_COMMANDS) 3475 sp = ha->outstanding_cmds[index]; 3476 else 3477 sp = NULL; 3478 3479 if (sp) { 3480 /* Free outstanding command slot. */ 3481 ha->outstanding_cmds[index] = NULL; 3482 3483 /* Save ISP completion status */ 3484 CMD_RESULT(sp->cmd) = 0; 3485 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; 3486 3487 /* Place block on done queue */ 3488 list_add_tail(&sp->list, done_q); 3489 } else { 3490 /* 3491 * If we get here we have a real problem! 3492 */ 3493 printk(KERN_WARNING 3494 "qla1280: ISP invalid handle\n"); 3495 } 3496 } 3497 break; 3498 3499 case MBA_BUS_RESET: /* SCSI Bus Reset */ 3500 ha->flags.reset_marker = 1; 3501 index = mailbox[6] & BIT_0; 3502 ha->bus_settings[index].reset_marker = 1; 3503 3504 printk(KERN_DEBUG "qla1280_isr(): index %i " 3505 "asynchronous BUS_RESET\n", index); 3506 break; 3507 3508 case MBA_SYSTEM_ERR: /* System Error */ 3509 printk(KERN_WARNING 3510 "qla1280: ISP System Error - mbx1=%xh, mbx2=" 3511 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2], 3512 mailbox[3]); 3513 break; 3514 3515 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 3516 printk(KERN_WARNING 3517 "qla1280: ISP Request Transfer Error\n"); 3518 break; 3519 3520 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 3521 printk(KERN_WARNING 3522 "qla1280: ISP Response Transfer Error\n"); 3523 break; 3524 3525 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 3526 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n"); 3527 break; 3528 3529 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */ 3530 dprintk(2, 3531 "qla1280_isr: asynchronous TIMEOUT_RESET\n"); 3532 break; 3533 3534 case MBA_DEVICE_RESET: /* Bus Device Reset */ 3535 printk(KERN_INFO "qla1280_isr(): asynchronous " 3536 "BUS_DEVICE_RESET\n"); 3537 3538 ha->flags.reset_marker = 1; 3539 index = mailbox[6] & BIT_0; 3540 ha->bus_settings[index].reset_marker = 1; 3541 break; 3542 3543 case MBA_BUS_MODE_CHANGE: 3544 dprintk(2, 3545 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n"); 3546 break; 3547 3548 default: 3549 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */ 3550 if (mailbox[0] < MBA_ASYNC_EVENT) { 3551 wptr = &mailbox[0]; 3552 memcpy((uint16_t *) ha->mailbox_out, wptr, 3553 MAILBOX_REGISTER_COUNT * 3554 sizeof(uint16_t)); 3555 3556 if(ha->mailbox_wait != NULL) 3557 complete(ha->mailbox_wait); 3558 } 3559 break; 3560 } 3561 } else { 3562 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT); 3563 } 3564 3565 /* 3566 * We will receive interrupts during mailbox testing prior to 3567 * the card being marked online, hence the double check. 3568 */ 3569 if (!(ha->flags.online && !ha->mailbox_wait)) { 3570 dprintk(2, "qla1280_isr: Response pointer Error\n"); 3571 goto out; 3572 } 3573 3574 if (mailbox[5] >= RESPONSE_ENTRY_CNT) 3575 goto out; 3576 3577 while (ha->rsp_ring_index != mailbox[5]) { 3578 pkt = ha->response_ring_ptr; 3579 3580 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]" 3581 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]); 3582 dprintk(5,"qla1280_isr: response packet data\n"); 3583 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE); 3584 3585 if (pkt->entry_type == STATUS_TYPE) { 3586 if ((le16_to_cpu(pkt->scsi_status) & 0xff) 3587 || pkt->comp_status || pkt->entry_status) { 3588 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3589 "0x%x mailbox[5] = 0x%x, comp_status " 3590 "= 0x%x, scsi_status = 0x%x\n", 3591 ha->rsp_ring_index, mailbox[5], 3592 le16_to_cpu(pkt->comp_status), 3593 le16_to_cpu(pkt->scsi_status)); 3594 } 3595 } else { 3596 dprintk(2, "qla1280_isr: ha->rsp_ring_index = " 3597 "0x%x, mailbox[5] = 0x%x\n", 3598 ha->rsp_ring_index, mailbox[5]); 3599 dprintk(2, "qla1280_isr: response packet data\n"); 3600 qla1280_dump_buffer(2, (char *)pkt, 3601 RESPONSE_ENTRY_SIZE); 3602 } 3603 3604 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) { 3605 dprintk(2, "status: Cmd %p, handle %i\n", 3606 ha->outstanding_cmds[pkt->handle]->cmd, 3607 pkt->handle); 3608 if (pkt->entry_type == STATUS_TYPE) 3609 qla1280_status_entry(ha, pkt, done_q); 3610 else 3611 qla1280_error_entry(ha, pkt, done_q); 3612 /* Adjust ring index. */ 3613 ha->rsp_ring_index++; 3614 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) { 3615 ha->rsp_ring_index = 0; 3616 ha->response_ring_ptr = ha->response_ring; 3617 } else 3618 ha->response_ring_ptr++; 3619 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index); 3620 } 3621 } 3622 3623 out: 3624 LEAVE("qla1280_isr"); 3625} 3626 3627/* 3628 * qla1280_rst_aen 3629 * Processes asynchronous reset. 3630 * 3631 * Input: 3632 * ha = adapter block pointer. 3633 */ 3634static void 3635qla1280_rst_aen(struct scsi_qla_host *ha) 3636{ 3637 uint8_t bus; 3638 3639 ENTER("qla1280_rst_aen"); 3640 3641 if (ha->flags.online && !ha->flags.reset_active && 3642 !ha->flags.abort_isp_active) { 3643 ha->flags.reset_active = 1; 3644 while (ha->flags.reset_marker) { 3645 /* Issue marker command. */ 3646 ha->flags.reset_marker = 0; 3647 for (bus = 0; bus < ha->ports && 3648 !ha->flags.reset_marker; bus++) { 3649 if (ha->bus_settings[bus].reset_marker) { 3650 ha->bus_settings[bus].reset_marker = 0; 3651 qla1280_marker(ha, bus, 0, 0, 3652 MK_SYNC_ALL); 3653 } 3654 } 3655 } 3656 } 3657 3658 LEAVE("qla1280_rst_aen"); 3659} 3660 3661 3662/* 3663 * qla1280_status_entry 3664 * Processes received ISP status entry. 3665 * 3666 * Input: 3667 * ha = adapter block pointer. 3668 * pkt = entry pointer. 3669 * done_q = done queue. 3670 */ 3671static void 3672qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, 3673 struct list_head *done_q) 3674{ 3675 unsigned int bus, target, lun; 3676 int sense_sz; 3677 struct srb *sp; 3678 struct scsi_cmnd *cmd; 3679 uint32_t handle = le32_to_cpu(pkt->handle); 3680 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status); 3681 uint16_t comp_status = le16_to_cpu(pkt->comp_status); 3682 3683 ENTER("qla1280_status_entry"); 3684 3685 /* Validate handle. */ 3686 if (handle < MAX_OUTSTANDING_COMMANDS) 3687 sp = ha->outstanding_cmds[handle]; 3688 else 3689 sp = NULL; 3690 3691 if (!sp) { 3692 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n"); 3693 goto out; 3694 } 3695 3696 /* Free outstanding command slot. */ 3697 ha->outstanding_cmds[handle] = NULL; 3698 3699 cmd = sp->cmd; 3700 3701 /* Generate LU queue on cntrl, target, LUN */ 3702 bus = SCSI_BUS_32(cmd); 3703 target = SCSI_TCN_32(cmd); 3704 lun = SCSI_LUN_32(cmd); 3705 3706 if (comp_status || scsi_status) { 3707 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = " 3708 "0x%x, handle = 0x%x\n", comp_status, 3709 scsi_status, handle); 3710 } 3711 3712 /* Target busy or queue full */ 3713 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL || 3714 (scsi_status & 0xFF) == SAM_STAT_BUSY) { 3715 CMD_RESULT(cmd) = scsi_status & 0xff; 3716 } else { 3717 3718 /* Save ISP completion status */ 3719 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd); 3720 3721 if (scsi_status & SAM_STAT_CHECK_CONDITION) { 3722 if (comp_status != CS_ARS_FAILED) { 3723 uint16_t req_sense_length = 3724 le16_to_cpu(pkt->req_sense_length); 3725 if (req_sense_length < CMD_SNSLEN(cmd)) 3726 sense_sz = req_sense_length; 3727 else 3728 /* 3729 * scsi_cmnd->sense_buffer is 3730 * 64 bytes, why only copy 63? 3731 * This looks wrong! /Jes 3732 */ 3733 sense_sz = CMD_SNSLEN(cmd) - 1; 3734 3735 memcpy(cmd->sense_buffer, 3736 &pkt->req_sense_data, sense_sz); 3737 } else 3738 sense_sz = 0; 3739 memset(cmd->sense_buffer + sense_sz, 0, 3740 SCSI_SENSE_BUFFERSIZE - sense_sz); 3741 3742 dprintk(2, "qla1280_status_entry: Check " 3743 "condition Sense data, b %i, t %i, " 3744 "l %i\n", bus, target, lun); 3745 if (sense_sz) 3746 qla1280_dump_buffer(2, 3747 (char *)cmd->sense_buffer, 3748 sense_sz); 3749 } 3750 } 3751 3752 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; 3753 3754 /* Place command on done queue. */ 3755 list_add_tail(&sp->list, done_q); 3756 out: 3757 LEAVE("qla1280_status_entry"); 3758} 3759 3760/* 3761 * qla1280_error_entry 3762 * Processes error entry. 3763 * 3764 * Input: 3765 * ha = adapter block pointer. 3766 * pkt = entry pointer. 3767 * done_q = done queue. 3768 */ 3769static void 3770qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, 3771 struct list_head *done_q) 3772{ 3773 struct srb *sp; 3774 uint32_t handle = le32_to_cpu(pkt->handle); 3775 3776 ENTER("qla1280_error_entry"); 3777 3778 if (pkt->entry_status & BIT_3) 3779 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n"); 3780 else if (pkt->entry_status & BIT_2) 3781 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n"); 3782 else if (pkt->entry_status & BIT_1) 3783 dprintk(2, "qla1280_error_entry: FULL flag error\n"); 3784 else 3785 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n"); 3786 3787 /* Validate handle. */ 3788 if (handle < MAX_OUTSTANDING_COMMANDS) 3789 sp = ha->outstanding_cmds[handle]; 3790 else 3791 sp = NULL; 3792 3793 if (sp) { 3794 /* Free outstanding command slot. */ 3795 ha->outstanding_cmds[handle] = NULL; 3796 3797 /* Bad payload or header */ 3798 if (pkt->entry_status & (BIT_3 + BIT_2)) { 3799 /* Bad payload or header, set error status. */ 3800 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */ 3801 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3802 } else if (pkt->entry_status & BIT_1) { /* FULL flag */ 3803 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16; 3804 } else { 3805 /* Set error status. */ 3806 CMD_RESULT(sp->cmd) = DID_ERROR << 16; 3807 } 3808 3809 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; 3810 3811 /* Place command on done queue. */ 3812 list_add_tail(&sp->list, done_q); 3813 } 3814#ifdef QLA_64BIT_PTR 3815 else if (pkt->entry_type == COMMAND_A64_TYPE) { 3816 printk(KERN_WARNING "!qla1280: Error Entry invalid handle"); 3817 } 3818#endif 3819 3820 LEAVE("qla1280_error_entry"); 3821} 3822 3823/* 3824 * qla1280_abort_isp 3825 * Resets ISP and aborts all outstanding commands. 3826 * 3827 * Input: 3828 * ha = adapter block pointer. 3829 * 3830 * Returns: 3831 * 0 = success 3832 */ 3833static int 3834qla1280_abort_isp(struct scsi_qla_host *ha) 3835{ 3836 struct device_reg __iomem *reg = ha->iobase; 3837 struct srb *sp; 3838 int status = 0; 3839 int cnt; 3840 int bus; 3841 3842 ENTER("qla1280_abort_isp"); 3843 3844 if (ha->flags.abort_isp_active || !ha->flags.online) 3845 goto out; 3846 3847 ha->flags.abort_isp_active = 1; 3848 3849 /* Disable ISP interrupts. */ 3850 qla1280_disable_intrs(ha); 3851 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 3852 RD_REG_WORD(®->id_l); 3853 3854 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n", 3855 ha->host_no); 3856 /* Dequeue all commands in outstanding command list. */ 3857 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3858 struct scsi_cmnd *cmd; 3859 sp = ha->outstanding_cmds[cnt]; 3860 if (sp) { 3861 cmd = sp->cmd; 3862 CMD_RESULT(cmd) = DID_RESET << 16; 3863 CMD_HANDLE(cmd) = COMPLETED_HANDLE; 3864 ha->outstanding_cmds[cnt] = NULL; 3865 list_add_tail(&sp->list, &ha->done_q); 3866 } 3867 } 3868 3869 qla1280_done(ha); 3870 3871 status = qla1280_load_firmware(ha); 3872 if (status) 3873 goto out; 3874 3875 /* Setup adapter based on NVRAM parameters. */ 3876 qla1280_nvram_config (ha); 3877 3878 status = qla1280_init_rings(ha); 3879 if (status) 3880 goto out; 3881 3882 /* Issue SCSI reset. */ 3883 for (bus = 0; bus < ha->ports; bus++) 3884 qla1280_bus_reset(ha, bus); 3885 3886 ha->flags.abort_isp_active = 0; 3887 out: 3888 if (status) { 3889 printk(KERN_WARNING 3890 "qla1280: ISP error recovery failed, board disabled"); 3891 qla1280_reset_adapter(ha); 3892 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n"); 3893 } 3894 3895 LEAVE("qla1280_abort_isp"); 3896 return status; 3897} 3898 3899 3900/* 3901 * qla1280_debounce_register 3902 * Debounce register. 3903 * 3904 * Input: 3905 * port = register address. 3906 * 3907 * Returns: 3908 * register value. 3909 */ 3910static u16 3911qla1280_debounce_register(volatile u16 __iomem * addr) 3912{ 3913 volatile u16 ret; 3914 volatile u16 ret2; 3915 3916 ret = RD_REG_WORD(addr); 3917 ret2 = RD_REG_WORD(addr); 3918 3919 if (ret == ret2) 3920 return ret; 3921 3922 do { 3923 cpu_relax(); 3924 ret = RD_REG_WORD(addr); 3925 ret2 = RD_REG_WORD(addr); 3926 } while (ret != ret2); 3927 3928 return ret; 3929} 3930 3931 3932/************************************************************************ 3933 * qla1280_check_for_dead_scsi_bus * 3934 * * 3935 * This routine checks for a dead SCSI bus * 3936 ************************************************************************/ 3937#define SET_SXP_BANK 0x0100 3938#define SCSI_PHASE_INVALID 0x87FF 3939static int 3940qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) 3941{ 3942 uint16_t config_reg, scsi_control; 3943 struct device_reg __iomem *reg = ha->iobase; 3944 3945 if (ha->bus_settings[bus].scsi_bus_dead) { 3946 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC); 3947 config_reg = RD_REG_WORD(®->cfg_1); 3948 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK); 3949 scsi_control = RD_REG_WORD(®->scsiControlPins); 3950 WRT_REG_WORD(®->cfg_1, config_reg); 3951 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC); 3952 3953 if (scsi_control == SCSI_PHASE_INVALID) { 3954 ha->bus_settings[bus].scsi_bus_dead = 1; 3955 return 1; /* bus is dead */ 3956 } else { 3957 ha->bus_settings[bus].scsi_bus_dead = 0; 3958 ha->bus_settings[bus].failed_reset_count = 0; 3959 } 3960 } 3961 return 0; /* bus is not dead */ 3962} 3963 3964static void 3965qla1280_get_target_parameters(struct scsi_qla_host *ha, 3966 struct scsi_device *device) 3967{ 3968 uint16_t mb[MAILBOX_REGISTER_COUNT]; 3969 int bus, target, lun; 3970 3971 bus = device->channel; 3972 target = device->id; 3973 lun = device->lun; 3974 3975 3976 mb[0] = MBC_GET_TARGET_PARAMETERS; 3977 mb[1] = (uint16_t) (bus ? target | BIT_7 : target); 3978 mb[1] <<= 8; 3979 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0, 3980 &mb[0]); 3981 3982 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun); 3983 3984 if (mb[3] != 0) { 3985 printk(" Sync: period %d, offset %d", 3986 (mb[3] & 0xff), (mb[3] >> 8)); 3987 if (mb[2] & BIT_13) 3988 printk(", Wide"); 3989 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2) 3990 printk(", DT"); 3991 } else 3992 printk(" Async"); 3993 3994 if (device->simple_tags) 3995 printk(", Tagged queuing: depth %d", device->queue_depth); 3996 printk("\n"); 3997} 3998 3999 4000#if DEBUG_QLA1280 4001static void 4002__qla1280_dump_buffer(char *b, int size) 4003{ 4004 int cnt; 4005 u8 c; 4006 4007 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah " 4008 "Bh Ch Dh Eh Fh\n"); 4009 printk(KERN_DEBUG "---------------------------------------------" 4010 "------------------\n"); 4011 4012 for (cnt = 0; cnt < size;) { 4013 c = *b++; 4014 4015 printk("0x%02x", c); 4016 cnt++; 4017 if (!(cnt % 16)) 4018 printk("\n"); 4019 else 4020 printk(" "); 4021 } 4022 if (cnt % 16) 4023 printk("\n"); 4024} 4025 4026/************************************************************************** 4027 * ql1280_print_scsi_cmd 4028 * 4029 **************************************************************************/ 4030static void 4031__qla1280_print_scsi_cmd(struct scsi_cmnd *cmd) 4032{ 4033 struct scsi_qla_host *ha; 4034 struct Scsi_Host *host = CMD_HOST(cmd); 4035 struct srb *sp; 4036 /* struct scatterlist *sg; */ 4037 4038 int i; 4039 ha = (struct scsi_qla_host *)host->hostdata; 4040 4041 sp = (struct srb *)CMD_SP(cmd); 4042 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd)); 4043 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n", 4044 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd), 4045 CMD_CDBLEN(cmd)); 4046 printk(" CDB = "); 4047 for (i = 0; i < cmd->cmd_len; i++) { 4048 printk("0x%02x ", cmd->cmnd[i]); 4049 } 4050 printk(" seg_cnt =%d\n", scsi_sg_count(cmd)); 4051 printk(" request buffer=0x%p, request buffer len=0x%x\n", 4052 scsi_sglist(cmd), scsi_bufflen(cmd)); 4053 /* if (cmd->use_sg) 4054 { 4055 sg = (struct scatterlist *) cmd->request_buffer; 4056 printk(" SG buffer: \n"); 4057 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist))); 4058 } */ 4059 printk(" tag=%d, transfersize=0x%x \n", 4060 cmd->tag, cmd->transfersize); 4061 printk(" SP=0x%p\n", CMD_SP(cmd)); 4062 printk(" underflow size = 0x%x, direction=0x%x\n", 4063 cmd->underflow, cmd->sc_data_direction); 4064} 4065 4066/************************************************************************** 4067 * ql1280_dump_device 4068 * 4069 **************************************************************************/ 4070static void 4071ql1280_dump_device(struct scsi_qla_host *ha) 4072{ 4073 4074 struct scsi_cmnd *cp; 4075 struct srb *sp; 4076 int i; 4077 4078 printk(KERN_DEBUG "Outstanding Commands on controller:\n"); 4079 4080 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { 4081 if ((sp = ha->outstanding_cmds[i]) == NULL) 4082 continue; 4083 if ((cp = sp->cmd) == NULL) 4084 continue; 4085 qla1280_print_scsi_cmd(1, cp); 4086 } 4087} 4088#endif 4089 4090 4091enum tokens { 4092 TOKEN_NVRAM, 4093 TOKEN_SYNC, 4094 TOKEN_WIDE, 4095 TOKEN_PPR, 4096 TOKEN_VERBOSE, 4097 TOKEN_DEBUG, 4098}; 4099 4100struct setup_tokens { 4101 char *token; 4102 int val; 4103}; 4104 4105static struct setup_tokens setup_token[] __initdata = 4106{ 4107 { "nvram", TOKEN_NVRAM }, 4108 { "sync", TOKEN_SYNC }, 4109 { "wide", TOKEN_WIDE }, 4110 { "ppr", TOKEN_PPR }, 4111 { "verbose", TOKEN_VERBOSE }, 4112 { "debug", TOKEN_DEBUG }, 4113}; 4114 4115 4116/************************************************************************** 4117 * qla1280_setup 4118 * 4119 * Handle boot parameters. This really needs to be changed so one 4120 * can specify per adapter parameters. 4121 **************************************************************************/ 4122static int __init 4123qla1280_setup(char *s) 4124{ 4125 char *cp, *ptr; 4126 unsigned long val; 4127 int toke; 4128 4129 cp = s; 4130 4131 while (cp && (ptr = strchr(cp, ':'))) { 4132 ptr++; 4133 if (!strcmp(ptr, "yes")) { 4134 val = 0x10000; 4135 ptr += 3; 4136 } else if (!strcmp(ptr, "no")) { 4137 val = 0; 4138 ptr += 2; 4139 } else 4140 val = simple_strtoul(ptr, &ptr, 0); 4141 4142 switch ((toke = qla1280_get_token(cp))) { 4143 case TOKEN_NVRAM: 4144 if (!val) 4145 driver_setup.no_nvram = 1; 4146 break; 4147 case TOKEN_SYNC: 4148 if (!val) 4149 driver_setup.no_sync = 1; 4150 else if (val != 0x10000) 4151 driver_setup.sync_mask = val; 4152 break; 4153 case TOKEN_WIDE: 4154 if (!val) 4155 driver_setup.no_wide = 1; 4156 else if (val != 0x10000) 4157 driver_setup.wide_mask = val; 4158 break; 4159 case TOKEN_PPR: 4160 if (!val) 4161 driver_setup.no_ppr = 1; 4162 else if (val != 0x10000) 4163 driver_setup.ppr_mask = val; 4164 break; 4165 case TOKEN_VERBOSE: 4166 qla1280_verbose = val; 4167 break; 4168 default: 4169 printk(KERN_INFO "qla1280: unknown boot option %s\n", 4170 cp); 4171 } 4172 4173 cp = strchr(ptr, ';'); 4174 if (cp) 4175 cp++; 4176 else { 4177 break; 4178 } 4179 } 4180 return 1; 4181} 4182 4183 4184static int __init 4185qla1280_get_token(char *str) 4186{ 4187 char *sep; 4188 long ret = -1; 4189 int i; 4190 4191 sep = strchr(str, ':'); 4192 4193 if (sep) { 4194 for (i = 0; i < ARRAY_SIZE(setup_token); i++) { 4195 if (!strncmp(setup_token[i].token, str, (sep - str))) { 4196 ret = setup_token[i].val; 4197 break; 4198 } 4199 } 4200 } 4201 4202 return ret; 4203} 4204 4205 4206static struct scsi_host_template qla1280_driver_template = { 4207 .module = THIS_MODULE, 4208 .proc_name = "qla1280", 4209 .name = "Qlogic ISP 1280/12160", 4210 .info = qla1280_info, 4211 .slave_configure = qla1280_slave_configure, 4212 .queuecommand = qla1280_queuecommand, 4213 .eh_abort_handler = qla1280_eh_abort, 4214 .eh_device_reset_handler= qla1280_eh_device_reset, 4215 .eh_bus_reset_handler = qla1280_eh_bus_reset, 4216 .eh_host_reset_handler = qla1280_eh_adapter_reset, 4217 .bios_param = qla1280_biosparam, 4218 .can_queue = 0xfffff, 4219 .this_id = -1, 4220 .sg_tablesize = SG_ALL, 4221 .cmd_per_lun = 1, 4222 .use_clustering = ENABLE_CLUSTERING, 4223}; 4224 4225 4226static int 4227qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 4228{ 4229 int devnum = id->driver_data; 4230 struct qla_boards *bdp = &ql1280_board_tbl[devnum]; 4231 struct Scsi_Host *host; 4232 struct scsi_qla_host *ha; 4233 int error = -ENODEV; 4234 4235 /* Bypass all AMI SUBSYS VENDOR IDs */ 4236 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) { 4237 printk(KERN_INFO 4238 "qla1280: Skipping AMI SubSys Vendor ID Chip\n"); 4239 goto error; 4240 } 4241 4242 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n", 4243 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn)); 4244 4245 if (pci_enable_device(pdev)) { 4246 printk(KERN_WARNING 4247 "qla1280: Failed to enabled pci device, aborting.\n"); 4248 goto error; 4249 } 4250 4251 pci_set_master(pdev); 4252 4253 error = -ENOMEM; 4254 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha)); 4255 if (!host) { 4256 printk(KERN_WARNING 4257 "qla1280: Failed to register host, aborting.\n"); 4258 goto error_disable_device; 4259 } 4260 4261 ha = (struct scsi_qla_host *)host->hostdata; 4262 memset(ha, 0, sizeof(struct scsi_qla_host)); 4263 4264 ha->pdev = pdev; 4265 ha->devnum = devnum; /* specifies microcode load address */ 4266 4267#ifdef QLA_64BIT_PTR 4268 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { 4269 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) { 4270 printk(KERN_WARNING "scsi(%li): Unable to set a " 4271 "suitable DMA mask - aborting\n", ha->host_no); 4272 error = -ENODEV; 4273 goto error_put_host; 4274 } 4275 } else 4276 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n", 4277 ha->host_no); 4278#else 4279 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) { 4280 printk(KERN_WARNING "scsi(%li): Unable to set a " 4281 "suitable DMA mask - aborting\n", ha->host_no); 4282 error = -ENODEV; 4283 goto error_put_host; 4284 } 4285#endif 4286 4287 ha->request_ring = pci_alloc_consistent(ha->pdev, 4288 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4289 &ha->request_dma); 4290 if (!ha->request_ring) { 4291 printk(KERN_INFO "qla1280: Failed to get request memory\n"); 4292 goto error_put_host; 4293 } 4294 4295 ha->response_ring = pci_alloc_consistent(ha->pdev, 4296 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4297 &ha->response_dma); 4298 if (!ha->response_ring) { 4299 printk(KERN_INFO "qla1280: Failed to get response memory\n"); 4300 goto error_free_request_ring; 4301 } 4302 4303 ha->ports = bdp->numPorts; 4304 4305 ha->host = host; 4306 ha->host_no = host->host_no; 4307 4308 host->irq = pdev->irq; 4309 host->max_channel = bdp->numPorts - 1; 4310 host->max_lun = MAX_LUNS - 1; 4311 host->max_id = MAX_TARGETS; 4312 host->max_sectors = 1024; 4313 host->unique_id = host->host_no; 4314 4315 error = -ENODEV; 4316 4317#if MEMORY_MAPPED_IO 4318 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1); 4319 if (!ha->mmpbase) { 4320 printk(KERN_INFO "qla1280: Unable to map I/O memory\n"); 4321 goto error_free_response_ring; 4322 } 4323 4324 host->base = (unsigned long)ha->mmpbase; 4325 ha->iobase = (struct device_reg __iomem *)ha->mmpbase; 4326#else 4327 host->io_port = pci_resource_start(ha->pdev, 0); 4328 if (!request_region(host->io_port, 0xff, "qla1280")) { 4329 printk(KERN_INFO "qla1280: Failed to reserve i/o region " 4330 "0x%04lx-0x%04lx - already in use\n", 4331 host->io_port, host->io_port + 0xff); 4332 goto error_free_response_ring; 4333 } 4334 4335 ha->iobase = (struct device_reg *)host->io_port; 4336#endif 4337 4338 INIT_LIST_HEAD(&ha->done_q); 4339 4340 /* Disable ISP interrupts. */ 4341 qla1280_disable_intrs(ha); 4342 4343 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED, 4344 "qla1280", ha)) { 4345 printk("qla1280 : Failed to reserve interrupt %d already " 4346 "in use\n", pdev->irq); 4347 goto error_release_region; 4348 } 4349 4350 /* load the F/W, read paramaters, and init the H/W */ 4351 if (qla1280_initialize_adapter(ha)) { 4352 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n"); 4353 goto error_free_irq; 4354 } 4355 4356 /* set our host ID (need to do something about our two IDs) */ 4357 host->this_id = ha->bus_settings[0].id; 4358 4359 pci_set_drvdata(pdev, host); 4360 4361 error = scsi_add_host(host, &pdev->dev); 4362 if (error) 4363 goto error_disable_adapter; 4364 scsi_scan_host(host); 4365 4366 return 0; 4367 4368 error_disable_adapter: 4369 qla1280_disable_intrs(ha); 4370 error_free_irq: 4371 free_irq(pdev->irq, ha); 4372 error_release_region: 4373#if MEMORY_MAPPED_IO 4374 iounmap(ha->mmpbase); 4375#else 4376 release_region(host->io_port, 0xff); 4377#endif 4378 error_free_response_ring: 4379 pci_free_consistent(ha->pdev, 4380 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)), 4381 ha->response_ring, ha->response_dma); 4382 error_free_request_ring: 4383 pci_free_consistent(ha->pdev, 4384 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)), 4385 ha->request_ring, ha->request_dma); 4386 error_put_host: 4387 scsi_host_put(host); 4388 error_disable_device: 4389 pci_disable_device(pdev); 4390 error: 4391 return error; 4392} 4393 4394 4395static void 4396qla1280_remove_one(struct pci_dev *pdev) 4397{ 4398 struct Scsi_Host *host = pci_get_drvdata(pdev); 4399 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; 4400 4401 scsi_remove_host(host); 4402 4403 qla1280_disable_intrs(ha); 4404 4405 free_irq(pdev->irq, ha); 4406 4407#if MEMORY_MAPPED_IO 4408 iounmap(ha->mmpbase); 4409#else 4410 release_region(host->io_port, 0xff); 4411#endif 4412 4413 pci_free_consistent(ha->pdev, 4414 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))), 4415 ha->request_ring, ha->request_dma); 4416 pci_free_consistent(ha->pdev, 4417 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))), 4418 ha->response_ring, ha->response_dma); 4419 4420 pci_disable_device(pdev); 4421 4422 scsi_host_put(host); 4423} 4424 4425static struct pci_driver qla1280_pci_driver = { 4426 .name = "qla1280", 4427 .id_table = qla1280_pci_tbl, 4428 .probe = qla1280_probe_one, 4429 .remove = qla1280_remove_one, 4430}; 4431 4432static int __init 4433qla1280_init(void) 4434{ 4435 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) { 4436 printk(KERN_WARNING 4437 "qla1280: struct srb too big, aborting\n"); 4438 return -EINVAL; 4439 } 4440 4441#ifdef MODULE 4442 /* 4443 * If we are called as a module, the qla1280 pointer may not be null 4444 * and it would point to our bootup string, just like on the lilo 4445 * command line. IF not NULL, then process this config string with 4446 * qla1280_setup 4447 * 4448 * Boot time Options 4449 * To add options at boot time add a line to your lilo.conf file like: 4450 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}" 4451 * which will result in the first four devices on the first two 4452 * controllers being set to a tagged queue depth of 32. 4453 */ 4454 if (qla1280) 4455 qla1280_setup(qla1280); 4456#endif 4457 4458 return pci_register_driver(&qla1280_pci_driver); 4459} 4460 4461static void __exit 4462qla1280_exit(void) 4463{ 4464 int i; 4465 4466 pci_unregister_driver(&qla1280_pci_driver); 4467 /* release any allocated firmware images */ 4468 for (i = 0; i < QL_NUM_FW_IMAGES; i++) { 4469 release_firmware(qla1280_fw_tbl[i].fw); 4470 qla1280_fw_tbl[i].fw = NULL; 4471 } 4472} 4473 4474module_init(qla1280_init); 4475module_exit(qla1280_exit); 4476 4477MODULE_AUTHOR("Qlogic & Jes Sorensen"); 4478MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver"); 4479MODULE_LICENSE("GPL"); 4480MODULE_FIRMWARE("qlogic/1040.bin"); 4481MODULE_FIRMWARE("qlogic/1280.bin"); 4482MODULE_FIRMWARE("qlogic/12160.bin"); 4483MODULE_VERSION(QLA1280_VERSION); 4484 4485/* 4486 * Overrides for Emacs so that we almost follow Linus's tabbing style. 4487 * Emacs will notice this stuff at the end of the file and automatically 4488 * adjust the settings for this buffer only. This must remain at the end 4489 * of the file. 4490 * --------------------------------------------------------------------------- 4491 * Local variables: 4492 * c-basic-offset: 8 4493 * tab-width: 8 4494 * End: 4495 */ 4496