1/* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * - Redistributions in binary form must reproduce the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer in the documentation and/or other materials 20 * provided with the distribution. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 29 * SOFTWARE. 30 */ 31#ifndef __T4_H__ 32#define __T4_H__ 33 34#include "t4_hw.h" 35#include "t4_regs.h" 36#include "t4_msg.h" 37#include "t4fw_ri_api.h" 38 39#define T4_MAX_NUM_PD 65536 40#define T4_MAX_MR_SIZE (~0ULL) 41#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 42#define T4_STAG_UNSET 0xffffffff 43#define T4_FW_MAJ 0 44#define A_PCIE_MA_SYNC 0x30b4 45 46struct t4_status_page { 47 __be32 rsvd1; /* flit 0 - hw owns */ 48 __be16 rsvd2; 49 __be16 qid; 50 __be16 cidx; 51 __be16 pidx; 52 u8 qp_err; /* flit 1 - sw owns */ 53 u8 db_off; 54 u8 pad; 55 u16 host_wq_pidx; 56 u16 host_cidx; 57 u16 host_pidx; 58}; 59 60#define T4_EQ_ENTRY_SIZE 64 61 62#define T4_SQ_NUM_SLOTS 5 63#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) 64#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ 65 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 66#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ 67 sizeof(struct fw_ri_immd))) 68#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \ 69 sizeof(struct fw_ri_rdma_write_wr) - \ 70 sizeof(struct fw_ri_immd))) 71#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \ 72 sizeof(struct fw_ri_rdma_write_wr) - \ 73 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 74#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 75 sizeof(struct fw_ri_immd)) & ~31UL) 76#define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) 77#define T4_MAX_FR_DSGL 1024 78#define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64)) 79 80static inline int t4_max_fr_depth(int use_dsgl) 81{ 82 return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH; 83} 84 85#define T4_RQ_NUM_SLOTS 2 86#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) 87#define T4_MAX_RECV_SGE 4 88 89union t4_wr { 90 struct fw_ri_res_wr res; 91 struct fw_ri_wr ri; 92 struct fw_ri_rdma_write_wr write; 93 struct fw_ri_send_wr send; 94 struct fw_ri_rdma_read_wr read; 95 struct fw_ri_bind_mw_wr bind; 96 struct fw_ri_fr_nsmr_wr fr; 97 struct fw_ri_inv_lstag_wr inv; 98 struct t4_status_page status; 99 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; 100}; 101 102union t4_recv_wr { 103 struct fw_ri_recv_wr recv; 104 struct t4_status_page status; 105 __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; 106}; 107 108static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, 109 enum fw_wr_opcodes opcode, u8 flags, u8 len16) 110{ 111 wqe->send.opcode = (u8)opcode; 112 wqe->send.flags = flags; 113 wqe->send.wrid = wrid; 114 wqe->send.r1[0] = 0; 115 wqe->send.r1[1] = 0; 116 wqe->send.r1[2] = 0; 117 wqe->send.len16 = len16; 118} 119 120/* CQE/AE status codes */ 121#define T4_ERR_SUCCESS 0x0 122#define T4_ERR_STAG 0x1 /* STAG invalid: either the */ 123 /* STAG is offlimt, being 0, */ 124 /* or STAG_key mismatch */ 125#define T4_ERR_PDID 0x2 /* PDID mismatch */ 126#define T4_ERR_QPID 0x3 /* QPID mismatch */ 127#define T4_ERR_ACCESS 0x4 /* Invalid access right */ 128#define T4_ERR_WRAP 0x5 /* Wrap error */ 129#define T4_ERR_BOUND 0x6 /* base and bounds voilation */ 130#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ 131 /* shared memory region */ 132#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ 133 /* shared memory region */ 134#define T4_ERR_ECC 0x9 /* ECC error detected */ 135#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */ 136 /* reading PSTAG for a MW */ 137 /* Invalidate */ 138#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ 139 /* software error */ 140#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */ 141#define T4_ERR_CRC 0x10 /* CRC error */ 142#define T4_ERR_MARKER 0x11 /* Marker error */ 143#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */ 144#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */ 145#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */ 146#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */ 147#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */ 148#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */ 149#define T4_ERR_MSN 0x18 /* MSN error */ 150#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */ 151#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */ 152 /* or READ_REQ */ 153#define T4_ERR_MSN_GAP 0x1B 154#define T4_ERR_MSN_RANGE 0x1C 155#define T4_ERR_IRD_OVERFLOW 0x1D 156#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ 157 /* software error */ 158#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ 159 /* mismatch) */ 160/* 161 * CQE defs 162 */ 163struct t4_cqe { 164 __be32 header; 165 __be32 len; 166 union { 167 struct { 168 __be32 stag; 169 __be32 msn; 170 } rcqe; 171 struct { 172 u32 nada1; 173 u16 nada2; 174 u16 cidx; 175 } scqe; 176 struct { 177 __be32 wrid_hi; 178 __be32 wrid_low; 179 } gen; 180 } u; 181 __be64 reserved; 182 __be64 bits_type_ts; 183}; 184 185/* macros for flit 0 of the cqe */ 186 187#define S_CQE_QPID 12 188#define M_CQE_QPID 0xFFFFF 189#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) 190#define V_CQE_QPID(x) ((x)<<S_CQE_QPID) 191 192#define S_CQE_SWCQE 11 193#define M_CQE_SWCQE 0x1 194#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE) 195#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE) 196 197#define S_CQE_STATUS 5 198#define M_CQE_STATUS 0x1F 199#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS) 200#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS) 201 202#define S_CQE_TYPE 4 203#define M_CQE_TYPE 0x1 204#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE) 205#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE) 206 207#define S_CQE_OPCODE 0 208#define M_CQE_OPCODE 0xF 209#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE) 210#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE) 211 212#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header))) 213#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) 214#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) 215#define SQ_TYPE(x) (CQE_TYPE((x))) 216#define RQ_TYPE(x) (!CQE_TYPE((x))) 217#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) 218#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) 219 220#define CQE_SEND_OPCODE(x)( \ 221 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ 222 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ 223 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ 224 (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) 225 226#define CQE_LEN(x) (be32_to_cpu((x)->len)) 227 228/* used for RQ completion processing */ 229#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) 230#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn)) 231 232/* used for SQ completion processing */ 233#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) 234 235/* generic accessor macros */ 236#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) 237#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 238 239/* macros for flit 3 of the cqe */ 240#define S_CQE_GENBIT 63 241#define M_CQE_GENBIT 0x1 242#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) 243#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT) 244 245#define S_CQE_OVFBIT 62 246#define M_CQE_OVFBIT 0x1 247#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT) 248 249#define S_CQE_IQTYPE 60 250#define M_CQE_IQTYPE 0x3 251#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) 252 253#define M_CQE_TS 0x0fffffffffffffffULL 254#define G_CQE_TS(x) ((x) & M_CQE_TS) 255 256#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) 257#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) 258#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) 259 260struct t4_swsqe { 261 u64 wr_id; 262 struct t4_cqe cqe; 263 int read_len; 264 int opcode; 265 int complete; 266 int signaled; 267 u16 idx; 268 int flushed; 269 struct timespec host_ts; 270 u64 sge_ts; 271}; 272 273static inline pgprot_t t4_pgprot_wc(pgprot_t prot) 274{ 275#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) 276 return pgprot_writecombine(prot); 277#else 278 return pgprot_noncached(prot); 279#endif 280} 281 282enum { 283 T4_SQ_ONCHIP = (1<<0), 284}; 285 286struct t4_sq { 287 union t4_wr *queue; 288 dma_addr_t dma_addr; 289 DEFINE_DMA_UNMAP_ADDR(mapping); 290 unsigned long phys_addr; 291 struct t4_swsqe *sw_sq; 292 struct t4_swsqe *oldest_read; 293 u64 __iomem *udb; 294 size_t memsize; 295 u32 qid; 296 u16 in_use; 297 u16 size; 298 u16 cidx; 299 u16 pidx; 300 u16 wq_pidx; 301 u16 wq_pidx_inc; 302 u16 flags; 303 short flush_cidx; 304}; 305 306struct t4_swrqe { 307 u64 wr_id; 308 struct timespec host_ts; 309 u64 sge_ts; 310}; 311 312struct t4_rq { 313 union t4_recv_wr *queue; 314 dma_addr_t dma_addr; 315 DEFINE_DMA_UNMAP_ADDR(mapping); 316 struct t4_swrqe *sw_rq; 317 u64 __iomem *udb; 318 size_t memsize; 319 u32 qid; 320 u32 msn; 321 u32 rqt_hwaddr; 322 u16 rqt_size; 323 u16 in_use; 324 u16 size; 325 u16 cidx; 326 u16 pidx; 327 u16 wq_pidx; 328 u16 wq_pidx_inc; 329}; 330 331struct t4_wq { 332 struct t4_sq sq; 333 struct t4_rq rq; 334 void __iomem *db; 335 void __iomem *gts; 336 struct c4iw_rdev *rdev; 337 int flushed; 338}; 339 340static inline int t4_rqes_posted(struct t4_wq *wq) 341{ 342 return wq->rq.in_use; 343} 344 345static inline int t4_rq_empty(struct t4_wq *wq) 346{ 347 return wq->rq.in_use == 0; 348} 349 350static inline int t4_rq_full(struct t4_wq *wq) 351{ 352 return wq->rq.in_use == (wq->rq.size - 1); 353} 354 355static inline u32 t4_rq_avail(struct t4_wq *wq) 356{ 357 return wq->rq.size - 1 - wq->rq.in_use; 358} 359 360static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) 361{ 362 wq->rq.in_use++; 363 if (++wq->rq.pidx == wq->rq.size) 364 wq->rq.pidx = 0; 365 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 366 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) 367 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; 368} 369 370static inline void t4_rq_consume(struct t4_wq *wq) 371{ 372 wq->rq.in_use--; 373 wq->rq.msn++; 374 if (++wq->rq.cidx == wq->rq.size) 375 wq->rq.cidx = 0; 376} 377 378static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) 379{ 380 return wq->rq.queue[wq->rq.size].status.host_wq_pidx; 381} 382 383static inline u16 t4_rq_wq_size(struct t4_wq *wq) 384{ 385 return wq->rq.size * T4_RQ_NUM_SLOTS; 386} 387 388static inline int t4_sq_onchip(struct t4_sq *sq) 389{ 390 return sq->flags & T4_SQ_ONCHIP; 391} 392 393static inline int t4_sq_empty(struct t4_wq *wq) 394{ 395 return wq->sq.in_use == 0; 396} 397 398static inline int t4_sq_full(struct t4_wq *wq) 399{ 400 return wq->sq.in_use == (wq->sq.size - 1); 401} 402 403static inline u32 t4_sq_avail(struct t4_wq *wq) 404{ 405 return wq->sq.size - 1 - wq->sq.in_use; 406} 407 408static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) 409{ 410 wq->sq.in_use++; 411 if (++wq->sq.pidx == wq->sq.size) 412 wq->sq.pidx = 0; 413 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); 414 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) 415 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; 416} 417 418static inline void t4_sq_consume(struct t4_wq *wq) 419{ 420 BUG_ON(wq->sq.in_use < 1); 421 if (wq->sq.cidx == wq->sq.flush_cidx) 422 wq->sq.flush_cidx = -1; 423 wq->sq.in_use--; 424 if (++wq->sq.cidx == wq->sq.size) 425 wq->sq.cidx = 0; 426} 427 428static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) 429{ 430 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; 431} 432 433static inline u16 t4_sq_wq_size(struct t4_wq *wq) 434{ 435 return wq->sq.size * T4_SQ_NUM_SLOTS; 436} 437 438/* This function copies 64 byte coalesced work request to memory 439 * mapped BAR2 space. For coalesced WRs, the SGE fetches data 440 * from the FIFO instead of from Host. 441 */ 442static inline void pio_copy(u64 __iomem *dst, u64 *src) 443{ 444 int count = 8; 445 446 while (count) { 447 writeq(*src, dst); 448 src++; 449 dst++; 450 count--; 451 } 452} 453 454static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, 455 union t4_wr *wqe) 456{ 457 458 /* Flush host queue memory writes. */ 459 wmb(); 460 if (t5) { 461 if (inc == 1 && wqe) { 462 PDBG("%s: WC wq->sq.pidx = %d\n", 463 __func__, wq->sq.pidx); 464 pio_copy(wq->sq.udb + 7, (void *)wqe); 465 } else { 466 PDBG("%s: DB wq->sq.pidx = %d\n", 467 __func__, wq->sq.pidx); 468 writel(PIDX_T5(inc), wq->sq.udb); 469 } 470 471 /* Flush user doorbell area writes. */ 472 wmb(); 473 return; 474 } 475 writel(QID(wq->sq.qid) | PIDX(inc), wq->db); 476} 477 478static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, 479 union t4_recv_wr *wqe) 480{ 481 482 /* Flush host queue memory writes. */ 483 wmb(); 484 if (t5) { 485 if (inc == 1 && wqe) { 486 PDBG("%s: WC wq->rq.pidx = %d\n", 487 __func__, wq->rq.pidx); 488 pio_copy(wq->rq.udb + 7, (void *)wqe); 489 } else { 490 PDBG("%s: DB wq->rq.pidx = %d\n", 491 __func__, wq->rq.pidx); 492 writel(PIDX_T5(inc), wq->rq.udb); 493 } 494 495 /* Flush user doorbell area writes. */ 496 wmb(); 497 return; 498 } 499 writel(QID(wq->rq.qid) | PIDX(inc), wq->db); 500} 501 502static inline int t4_wq_in_error(struct t4_wq *wq) 503{ 504 return wq->rq.queue[wq->rq.size].status.qp_err; 505} 506 507static inline void t4_set_wq_in_error(struct t4_wq *wq) 508{ 509 wq->rq.queue[wq->rq.size].status.qp_err = 1; 510} 511 512static inline void t4_disable_wq_db(struct t4_wq *wq) 513{ 514 wq->rq.queue[wq->rq.size].status.db_off = 1; 515} 516 517static inline void t4_enable_wq_db(struct t4_wq *wq) 518{ 519 wq->rq.queue[wq->rq.size].status.db_off = 0; 520} 521 522static inline int t4_wq_db_enabled(struct t4_wq *wq) 523{ 524 return !wq->rq.queue[wq->rq.size].status.db_off; 525} 526 527enum t4_cq_flags { 528 CQ_ARMED = 1, 529}; 530 531struct t4_cq { 532 struct t4_cqe *queue; 533 dma_addr_t dma_addr; 534 DEFINE_DMA_UNMAP_ADDR(mapping); 535 struct t4_cqe *sw_queue; 536 void __iomem *gts; 537 struct c4iw_rdev *rdev; 538 u64 ugts; 539 size_t memsize; 540 __be64 bits_type_ts; 541 u32 cqid; 542 int vector; 543 u16 size; /* including status page */ 544 u16 cidx; 545 u16 sw_pidx; 546 u16 sw_cidx; 547 u16 sw_in_use; 548 u16 cidx_inc; 549 u8 gen; 550 u8 error; 551 unsigned long flags; 552}; 553 554static inline int t4_clear_cq_armed(struct t4_cq *cq) 555{ 556 return test_and_clear_bit(CQ_ARMED, &cq->flags); 557} 558 559static inline int t4_arm_cq(struct t4_cq *cq, int se) 560{ 561 u32 val; 562 563 set_bit(CQ_ARMED, &cq->flags); 564 while (cq->cidx_inc > CIDXINC_MASK) { 565 val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | 566 INGRESSQID(cq->cqid); 567 writel(val, cq->gts); 568 cq->cidx_inc -= CIDXINC_MASK; 569 } 570 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | 571 INGRESSQID(cq->cqid); 572 writel(val, cq->gts); 573 cq->cidx_inc = 0; 574 return 0; 575} 576 577static inline void t4_swcq_produce(struct t4_cq *cq) 578{ 579 cq->sw_in_use++; 580 if (cq->sw_in_use == cq->size) { 581 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); 582 cq->error = 1; 583 BUG_ON(1); 584 } 585 if (++cq->sw_pidx == cq->size) 586 cq->sw_pidx = 0; 587} 588 589static inline void t4_swcq_consume(struct t4_cq *cq) 590{ 591 BUG_ON(cq->sw_in_use < 1); 592 cq->sw_in_use--; 593 if (++cq->sw_cidx == cq->size) 594 cq->sw_cidx = 0; 595} 596 597static inline void t4_hwcq_consume(struct t4_cq *cq) 598{ 599 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; 600 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) { 601 u32 val; 602 603 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | 604 INGRESSQID(cq->cqid); 605 writel(val, cq->gts); 606 cq->cidx_inc = 0; 607 } 608 if (++cq->cidx == cq->size) { 609 cq->cidx = 0; 610 cq->gen ^= 1; 611 } 612} 613 614static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) 615{ 616 return (CQE_GENBIT(cqe) == cq->gen); 617} 618 619static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 620{ 621 int ret; 622 u16 prev_cidx; 623 624 if (cq->cidx == 0) 625 prev_cidx = cq->size - 1; 626 else 627 prev_cidx = cq->cidx - 1; 628 629 if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { 630 ret = -EOVERFLOW; 631 cq->error = 1; 632 printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); 633 BUG_ON(1); 634 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { 635 636 /* Ensure CQE is flushed to memory */ 637 rmb(); 638 *cqe = &cq->queue[cq->cidx]; 639 ret = 0; 640 } else 641 ret = -ENODATA; 642 return ret; 643} 644 645static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) 646{ 647 if (cq->sw_in_use == cq->size) { 648 PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); 649 cq->error = 1; 650 BUG_ON(1); 651 return NULL; 652 } 653 if (cq->sw_in_use) 654 return &cq->sw_queue[cq->sw_cidx]; 655 return NULL; 656} 657 658static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 659{ 660 int ret = 0; 661 662 if (cq->error) 663 ret = -ENODATA; 664 else if (cq->sw_in_use) 665 *cqe = &cq->sw_queue[cq->sw_cidx]; 666 else 667 ret = t4_next_hw_cqe(cq, cqe); 668 return ret; 669} 670 671static inline int t4_cq_in_error(struct t4_cq *cq) 672{ 673 return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; 674} 675 676static inline void t4_set_cq_in_error(struct t4_cq *cq) 677{ 678 ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; 679} 680#endif 681 682struct t4_dev_status_page { 683 u8 db_off; 684}; 685