[go: nahoru, domu]

i40e_adminq.c revision af28eec9f56742aa83ae6fd81fcf50ec981dca28
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * The full GNU General Public License is included in this distribution in
20 * the file called "COPYING".
21 *
22 * Contact Information:
23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *
26 ******************************************************************************/
27
28#include "i40e_status.h"
29#include "i40e_type.h"
30#include "i40e_register.h"
31#include "i40e_adminq.h"
32#include "i40e_prototype.h"
33
34static void i40e_resume_aq(struct i40e_hw *hw);
35
36/**
37 *  i40e_adminq_init_regs - Initialize AdminQ registers
38 *  @hw: pointer to the hardware structure
39 *
40 *  This assumes the alloc_asq and alloc_arq functions have already been called
41 **/
42static void i40e_adminq_init_regs(struct i40e_hw *hw)
43{
44	/* set head and tail registers in our local struct */
45	if (hw->mac.type == I40E_MAC_VF) {
46		hw->aq.asq.tail = I40E_VF_ATQT1;
47		hw->aq.asq.head = I40E_VF_ATQH1;
48		hw->aq.asq.len  = I40E_VF_ATQLEN1;
49		hw->aq.arq.tail = I40E_VF_ARQT1;
50		hw->aq.arq.head = I40E_VF_ARQH1;
51		hw->aq.arq.len  = I40E_VF_ARQLEN1;
52	} else {
53		hw->aq.asq.tail = I40E_PF_ATQT;
54		hw->aq.asq.head = I40E_PF_ATQH;
55		hw->aq.asq.len  = I40E_PF_ATQLEN;
56		hw->aq.arq.tail = I40E_PF_ARQT;
57		hw->aq.arq.head = I40E_PF_ARQH;
58		hw->aq.arq.len  = I40E_PF_ARQLEN;
59	}
60}
61
62/**
63 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
64 *  @hw: pointer to the hardware structure
65 **/
66static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
67{
68	i40e_status ret_code;
69	struct i40e_virt_mem mem;
70
71	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
72					 i40e_mem_atq_ring,
73					 (hw->aq.num_asq_entries *
74					 sizeof(struct i40e_aq_desc)),
75					 I40E_ADMINQ_DESC_ALIGNMENT);
76	if (ret_code)
77		return ret_code;
78
79	hw->aq.asq.desc = hw->aq.asq_mem.va;
80	hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
81
82	ret_code = i40e_allocate_virt_mem(hw, &mem,
83					  (hw->aq.num_asq_entries *
84					  sizeof(struct i40e_asq_cmd_details)));
85	if (ret_code) {
86		i40e_free_dma_mem(hw, &hw->aq.asq_mem);
87		hw->aq.asq_mem.va = NULL;
88		hw->aq.asq_mem.pa = 0;
89		return ret_code;
90	}
91
92	hw->aq.asq.details = mem.va;
93
94	return ret_code;
95}
96
97/**
98 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
99 *  @hw: pointer to the hardware structure
100 **/
101static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
102{
103	i40e_status ret_code;
104
105	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
106					 i40e_mem_arq_ring,
107					 (hw->aq.num_arq_entries *
108					 sizeof(struct i40e_aq_desc)),
109					 I40E_ADMINQ_DESC_ALIGNMENT);
110	if (ret_code)
111		return ret_code;
112
113	hw->aq.arq.desc = hw->aq.arq_mem.va;
114	hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
115
116	return ret_code;
117}
118
119/**
120 *  i40e_free_adminq_asq - Free Admin Queue send rings
121 *  @hw: pointer to the hardware structure
122 *
123 *  This assumes the posted send buffers have already been cleaned
124 *  and de-allocated
125 **/
126static void i40e_free_adminq_asq(struct i40e_hw *hw)
127{
128	struct i40e_virt_mem mem;
129
130	i40e_free_dma_mem(hw, &hw->aq.asq_mem);
131	hw->aq.asq_mem.va = NULL;
132	hw->aq.asq_mem.pa = 0;
133	mem.va = hw->aq.asq.details;
134	i40e_free_virt_mem(hw, &mem);
135	hw->aq.asq.details = NULL;
136}
137
138/**
139 *  i40e_free_adminq_arq - Free Admin Queue receive rings
140 *  @hw: pointer to the hardware structure
141 *
142 *  This assumes the posted receive buffers have already been cleaned
143 *  and de-allocated
144 **/
145static void i40e_free_adminq_arq(struct i40e_hw *hw)
146{
147	i40e_free_dma_mem(hw, &hw->aq.arq_mem);
148	hw->aq.arq_mem.va = NULL;
149	hw->aq.arq_mem.pa = 0;
150}
151
152/**
153 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
154 *  @hw:     pointer to the hardware structure
155 **/
156static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
157{
158	i40e_status ret_code;
159	struct i40e_aq_desc *desc;
160	struct i40e_virt_mem mem;
161	struct i40e_dma_mem *bi;
162	int i;
163
164	/* We'll be allocating the buffer info memory first, then we can
165	 * allocate the mapped buffers for the event processing
166	 */
167
168	/* buffer_info structures do not need alignment */
169	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
170					  sizeof(struct i40e_dma_mem)));
171	if (ret_code)
172		goto alloc_arq_bufs;
173	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
174
175	/* allocate the mapped buffers */
176	for (i = 0; i < hw->aq.num_arq_entries; i++) {
177		bi = &hw->aq.arq.r.arq_bi[i];
178		ret_code = i40e_allocate_dma_mem(hw, bi,
179						 i40e_mem_arq_buf,
180						 hw->aq.arq_buf_size,
181						 I40E_ADMINQ_DESC_ALIGNMENT);
182		if (ret_code)
183			goto unwind_alloc_arq_bufs;
184
185		/* now configure the descriptors for use */
186		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
187
188		desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
189		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190			desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
191		desc->opcode = 0;
192		/* This is in accordance with Admin queue design, there is no
193		 * register for buffer size configuration
194		 */
195		desc->datalen = cpu_to_le16((u16)bi->size);
196		desc->retval = 0;
197		desc->cookie_high = 0;
198		desc->cookie_low = 0;
199		desc->params.external.addr_high =
200			cpu_to_le32(upper_32_bits(bi->pa));
201		desc->params.external.addr_low =
202			cpu_to_le32(lower_32_bits(bi->pa));
203		desc->params.external.param0 = 0;
204		desc->params.external.param1 = 0;
205	}
206
207alloc_arq_bufs:
208	return ret_code;
209
210unwind_alloc_arq_bufs:
211	/* don't try to free the one that failed... */
212	i--;
213	for (; i >= 0; i--)
214		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215	mem.va = hw->aq.arq.r.arq_bi;
216	i40e_free_virt_mem(hw, &mem);
217
218	return ret_code;
219}
220
221/**
222 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
223 *  @hw:     pointer to the hardware structure
224 **/
225static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
226{
227	i40e_status ret_code;
228	struct i40e_virt_mem mem;
229	struct i40e_dma_mem *bi;
230	int i;
231
232	/* No mapped memory needed yet, just the buffer info structures */
233	ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
234					  sizeof(struct i40e_dma_mem)));
235	if (ret_code)
236		goto alloc_asq_bufs;
237	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
238
239	/* allocate the mapped buffers */
240	for (i = 0; i < hw->aq.num_asq_entries; i++) {
241		bi = &hw->aq.asq.r.asq_bi[i];
242		ret_code = i40e_allocate_dma_mem(hw, bi,
243						 i40e_mem_asq_buf,
244						 hw->aq.asq_buf_size,
245						 I40E_ADMINQ_DESC_ALIGNMENT);
246		if (ret_code)
247			goto unwind_alloc_asq_bufs;
248	}
249alloc_asq_bufs:
250	return ret_code;
251
252unwind_alloc_asq_bufs:
253	/* don't try to free the one that failed... */
254	i--;
255	for (; i >= 0; i--)
256		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
257	mem.va = hw->aq.asq.r.asq_bi;
258	i40e_free_virt_mem(hw, &mem);
259
260	return ret_code;
261}
262
263/**
264 *  i40e_free_arq_bufs - Free receive queue buffer info elements
265 *  @hw:     pointer to the hardware structure
266 **/
267static void i40e_free_arq_bufs(struct i40e_hw *hw)
268{
269	struct i40e_virt_mem mem;
270	int i;
271
272	for (i = 0; i < hw->aq.num_arq_entries; i++)
273		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
274
275	mem.va = hw->aq.arq.r.arq_bi;
276	i40e_free_virt_mem(hw, &mem);
277}
278
279/**
280 *  i40e_free_asq_bufs - Free send queue buffer info elements
281 *  @hw:     pointer to the hardware structure
282 **/
283static void i40e_free_asq_bufs(struct i40e_hw *hw)
284{
285	struct i40e_virt_mem mem;
286	int i;
287
288	/* only unmap if the address is non-NULL */
289	for (i = 0; i < hw->aq.num_asq_entries; i++)
290		if (hw->aq.asq.r.asq_bi[i].pa)
291			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
292
293	/* now free the buffer info list */
294	mem.va = hw->aq.asq.r.asq_bi;
295	i40e_free_virt_mem(hw, &mem);
296}
297
298/**
299 *  i40e_config_asq_regs - configure ASQ registers
300 *  @hw:     pointer to the hardware structure
301 *
302 *  Configure base address and length registers for the transmit queue
303 **/
304static void i40e_config_asq_regs(struct i40e_hw *hw)
305{
306	if (hw->mac.type == I40E_MAC_VF) {
307		/* configure the transmit queue */
308		wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
309		wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
310		wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
311					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
312	} else {
313		/* configure the transmit queue */
314		wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
315		wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
316		wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
317					  I40E_PF_ATQLEN_ATQENABLE_MASK));
318	}
319}
320
321/**
322 *  i40e_config_arq_regs - ARQ register configuration
323 *  @hw:     pointer to the hardware structure
324 *
325 * Configure base address and length registers for the receive (event queue)
326 **/
327static void i40e_config_arq_regs(struct i40e_hw *hw)
328{
329	if (hw->mac.type == I40E_MAC_VF) {
330		/* configure the receive queue */
331		wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
332		wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
333		wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
334					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
335	} else {
336		/* configure the receive queue */
337		wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
338		wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
339		wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
340					  I40E_PF_ARQLEN_ARQENABLE_MASK));
341	}
342
343	/* Update tail in the HW to post pre-allocated buffers */
344	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
345}
346
347/**
348 *  i40e_init_asq - main initialization routine for ASQ
349 *  @hw:     pointer to the hardware structure
350 *
351 *  This is the main initialization routine for the Admin Send Queue
352 *  Prior to calling this function, drivers *MUST* set the following fields
353 *  in the hw->aq structure:
354 *     - hw->aq.num_asq_entries
355 *     - hw->aq.arq_buf_size
356 *
357 *  Do *NOT* hold the lock when calling this as the memory allocation routines
358 *  called are not going to be atomic context safe
359 **/
360static i40e_status i40e_init_asq(struct i40e_hw *hw)
361{
362	i40e_status ret_code = 0;
363
364	if (hw->aq.asq.count > 0) {
365		/* queue already initialized */
366		ret_code = I40E_ERR_NOT_READY;
367		goto init_adminq_exit;
368	}
369
370	/* verify input for valid configuration */
371	if ((hw->aq.num_asq_entries == 0) ||
372	    (hw->aq.asq_buf_size == 0)) {
373		ret_code = I40E_ERR_CONFIG;
374		goto init_adminq_exit;
375	}
376
377	hw->aq.asq.next_to_use = 0;
378	hw->aq.asq.next_to_clean = 0;
379	hw->aq.asq.count = hw->aq.num_asq_entries;
380
381	/* allocate the ring memory */
382	ret_code = i40e_alloc_adminq_asq_ring(hw);
383	if (ret_code)
384		goto init_adminq_exit;
385
386	/* allocate buffers in the rings */
387	ret_code = i40e_alloc_asq_bufs(hw);
388	if (ret_code)
389		goto init_adminq_free_rings;
390
391	/* initialize base registers */
392	i40e_config_asq_regs(hw);
393
394	/* success! */
395	goto init_adminq_exit;
396
397init_adminq_free_rings:
398	i40e_free_adminq_asq(hw);
399
400init_adminq_exit:
401	return ret_code;
402}
403
404/**
405 *  i40e_init_arq - initialize ARQ
406 *  @hw:     pointer to the hardware structure
407 *
408 *  The main initialization routine for the Admin Receive (Event) Queue.
409 *  Prior to calling this function, drivers *MUST* set the following fields
410 *  in the hw->aq structure:
411 *     - hw->aq.num_asq_entries
412 *     - hw->aq.arq_buf_size
413 *
414 *  Do *NOT* hold the lock when calling this as the memory allocation routines
415 *  called are not going to be atomic context safe
416 **/
417static i40e_status i40e_init_arq(struct i40e_hw *hw)
418{
419	i40e_status ret_code = 0;
420
421	if (hw->aq.arq.count > 0) {
422		/* queue already initialized */
423		ret_code = I40E_ERR_NOT_READY;
424		goto init_adminq_exit;
425	}
426
427	/* verify input for valid configuration */
428	if ((hw->aq.num_arq_entries == 0) ||
429	    (hw->aq.arq_buf_size == 0)) {
430		ret_code = I40E_ERR_CONFIG;
431		goto init_adminq_exit;
432	}
433
434	hw->aq.arq.next_to_use = 0;
435	hw->aq.arq.next_to_clean = 0;
436	hw->aq.arq.count = hw->aq.num_arq_entries;
437
438	/* allocate the ring memory */
439	ret_code = i40e_alloc_adminq_arq_ring(hw);
440	if (ret_code)
441		goto init_adminq_exit;
442
443	/* allocate buffers in the rings */
444	ret_code = i40e_alloc_arq_bufs(hw);
445	if (ret_code)
446		goto init_adminq_free_rings;
447
448	/* initialize base registers */
449	i40e_config_arq_regs(hw);
450
451	/* success! */
452	goto init_adminq_exit;
453
454init_adminq_free_rings:
455	i40e_free_adminq_arq(hw);
456
457init_adminq_exit:
458	return ret_code;
459}
460
461/**
462 *  i40e_shutdown_asq - shutdown the ASQ
463 *  @hw:     pointer to the hardware structure
464 *
465 *  The main shutdown routine for the Admin Send Queue
466 **/
467static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
468{
469	i40e_status ret_code = 0;
470
471	if (hw->aq.asq.count == 0)
472		return I40E_ERR_NOT_READY;
473
474	/* Stop firmware AdminQ processing */
475	wr32(hw, hw->aq.asq.head, 0);
476	wr32(hw, hw->aq.asq.tail, 0);
477	wr32(hw, hw->aq.asq.len, 0);
478
479	/* make sure lock is available */
480	mutex_lock(&hw->aq.asq_mutex);
481
482	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
483
484	/* free ring buffers */
485	i40e_free_asq_bufs(hw);
486	/* free the ring descriptors */
487	i40e_free_adminq_asq(hw);
488
489	mutex_unlock(&hw->aq.asq_mutex);
490
491	return ret_code;
492}
493
494/**
495 *  i40e_shutdown_arq - shutdown ARQ
496 *  @hw:     pointer to the hardware structure
497 *
498 *  The main shutdown routine for the Admin Receive Queue
499 **/
500static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
501{
502	i40e_status ret_code = 0;
503
504	if (hw->aq.arq.count == 0)
505		return I40E_ERR_NOT_READY;
506
507	/* Stop firmware AdminQ processing */
508	wr32(hw, hw->aq.arq.head, 0);
509	wr32(hw, hw->aq.arq.tail, 0);
510	wr32(hw, hw->aq.arq.len, 0);
511
512	/* make sure lock is available */
513	mutex_lock(&hw->aq.arq_mutex);
514
515	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
516
517	/* free ring buffers */
518	i40e_free_arq_bufs(hw);
519	/* free the ring descriptors */
520	i40e_free_adminq_arq(hw);
521
522	mutex_unlock(&hw->aq.arq_mutex);
523
524	return ret_code;
525}
526
527/**
528 *  i40e_init_adminq - main initialization routine for Admin Queue
529 *  @hw:     pointer to the hardware structure
530 *
531 *  Prior to calling this function, drivers *MUST* set the following fields
532 *  in the hw->aq structure:
533 *     - hw->aq.num_asq_entries
534 *     - hw->aq.num_arq_entries
535 *     - hw->aq.arq_buf_size
536 *     - hw->aq.asq_buf_size
537 **/
538i40e_status i40e_init_adminq(struct i40e_hw *hw)
539{
540	i40e_status ret_code;
541	u16 eetrack_lo, eetrack_hi;
542	int retry = 0;
543
544	/* verify input for valid configuration */
545	if ((hw->aq.num_arq_entries == 0) ||
546	    (hw->aq.num_asq_entries == 0) ||
547	    (hw->aq.arq_buf_size == 0) ||
548	    (hw->aq.asq_buf_size == 0)) {
549		ret_code = I40E_ERR_CONFIG;
550		goto init_adminq_exit;
551	}
552
553	/* initialize locks */
554	mutex_init(&hw->aq.asq_mutex);
555	mutex_init(&hw->aq.arq_mutex);
556
557	/* Set up register offsets */
558	i40e_adminq_init_regs(hw);
559
560	/* allocate the ASQ */
561	ret_code = i40e_init_asq(hw);
562	if (ret_code)
563		goto init_adminq_destroy_locks;
564
565	/* allocate the ARQ */
566	ret_code = i40e_init_arq(hw);
567	if (ret_code)
568		goto init_adminq_free_asq;
569
570	/* There are some cases where the firmware may not be quite ready
571	 * for AdminQ operations, so we retry the AdminQ setup a few times
572	 * if we see timeouts in this first AQ call.
573	 */
574	do {
575		ret_code = i40e_aq_get_firmware_version(hw,
576							&hw->aq.fw_maj_ver,
577							&hw->aq.fw_min_ver,
578							&hw->aq.api_maj_ver,
579							&hw->aq.api_min_ver,
580							NULL);
581		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
582			break;
583		retry++;
584		msleep(100);
585		i40e_resume_aq(hw);
586	} while (retry < 10);
587	if (ret_code != I40E_SUCCESS)
588		goto init_adminq_free_arq;
589
590	if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
591	    hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
592		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
593		goto init_adminq_free_arq;
594	}
595	i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
596	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
597	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
598	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
599
600	ret_code = i40e_aq_set_hmc_resource_profile(hw,
601						    I40E_HMC_PROFILE_DEFAULT,
602						    0,
603						    NULL);
604	ret_code = 0;
605
606	/* success! */
607	goto init_adminq_exit;
608
609init_adminq_free_arq:
610	i40e_shutdown_arq(hw);
611init_adminq_free_asq:
612	i40e_shutdown_asq(hw);
613init_adminq_destroy_locks:
614
615init_adminq_exit:
616	return ret_code;
617}
618
619/**
620 *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
621 *  @hw:     pointer to the hardware structure
622 **/
623i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
624{
625	i40e_status ret_code = 0;
626
627	i40e_shutdown_asq(hw);
628	i40e_shutdown_arq(hw);
629
630	/* destroy the locks */
631
632	return ret_code;
633}
634
635/**
636 *  i40e_clean_asq - cleans Admin send queue
637 *  @asq: pointer to the adminq send ring
638 *
639 *  returns the number of free desc
640 **/
641static u16 i40e_clean_asq(struct i40e_hw *hw)
642{
643	struct i40e_adminq_ring *asq = &(hw->aq.asq);
644	struct i40e_asq_cmd_details *details;
645	u16 ntc = asq->next_to_clean;
646	struct i40e_aq_desc desc_cb;
647	struct i40e_aq_desc *desc;
648
649	desc = I40E_ADMINQ_DESC(*asq, ntc);
650	details = I40E_ADMINQ_DETAILS(*asq, ntc);
651	while (rd32(hw, hw->aq.asq.head) != ntc) {
652		if (details->callback) {
653			I40E_ADMINQ_CALLBACK cb_func =
654					(I40E_ADMINQ_CALLBACK)details->callback;
655			desc_cb = *desc;
656			cb_func(hw, &desc_cb);
657		}
658		memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
659		memset((void *)details, 0,
660		       sizeof(struct i40e_asq_cmd_details));
661		ntc++;
662		if (ntc == asq->count)
663			ntc = 0;
664		desc = I40E_ADMINQ_DESC(*asq, ntc);
665		details = I40E_ADMINQ_DETAILS(*asq, ntc);
666	}
667
668	asq->next_to_clean = ntc;
669
670	return I40E_DESC_UNUSED(asq);
671}
672
673/**
674 *  i40e_asq_done - check if FW has processed the Admin Send Queue
675 *  @hw: pointer to the hw struct
676 *
677 *  Returns true if the firmware has processed all descriptors on the
678 *  admin send queue. Returns false if there are still requests pending.
679 **/
680static bool i40e_asq_done(struct i40e_hw *hw)
681{
682	/* AQ designers suggest use of head for better
683	 * timing reliability than DD bit
684	 */
685	return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
686
687}
688
689/**
690 *  i40e_asq_send_command - send command to Admin Queue
691 *  @hw: pointer to the hw struct
692 *  @desc: prefilled descriptor describing the command (non DMA mem)
693 *  @buff: buffer to use for indirect commands
694 *  @buff_size: size of buffer for indirect commands
695 *  @opaque: pointer to info to be used in async cleanup
696 *
697 *  This is the main send command driver routine for the Admin Queue send
698 *  queue.  It runs the queue, cleans the queue, etc
699 **/
700i40e_status i40e_asq_send_command(struct i40e_hw *hw,
701				struct i40e_aq_desc *desc,
702				void *buff, /* can be NULL */
703				u16  buff_size,
704				struct i40e_asq_cmd_details *cmd_details)
705{
706	i40e_status status = 0;
707	struct i40e_dma_mem *dma_buff = NULL;
708	struct i40e_asq_cmd_details *details;
709	struct i40e_aq_desc *desc_on_ring;
710	bool cmd_completed = false;
711	u16  retval = 0;
712
713	if (hw->aq.asq.count == 0) {
714		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
715			   "AQTX: Admin queue not initialized.\n");
716		status = I40E_ERR_QUEUE_EMPTY;
717		goto asq_send_command_exit;
718	}
719
720	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
721	if (cmd_details) {
722		*details = *cmd_details;
723
724		/* If the cmd_details are defined copy the cookie.  The
725		 * cpu_to_le32 is not needed here because the data is ignored
726		 * by the FW, only used by the driver
727		 */
728		if (details->cookie) {
729			desc->cookie_high =
730				cpu_to_le32(upper_32_bits(details->cookie));
731			desc->cookie_low =
732				cpu_to_le32(lower_32_bits(details->cookie));
733		}
734	} else {
735		memset(details, 0, sizeof(struct i40e_asq_cmd_details));
736	}
737
738	/* clear requested flags and then set additional flags if defined */
739	desc->flags &= ~cpu_to_le16(details->flags_dis);
740	desc->flags |= cpu_to_le16(details->flags_ena);
741
742	mutex_lock(&hw->aq.asq_mutex);
743
744	if (buff_size > hw->aq.asq_buf_size) {
745		i40e_debug(hw,
746			   I40E_DEBUG_AQ_MESSAGE,
747			   "AQTX: Invalid buffer size: %d.\n",
748			   buff_size);
749		status = I40E_ERR_INVALID_SIZE;
750		goto asq_send_command_error;
751	}
752
753	if (details->postpone && !details->async) {
754		i40e_debug(hw,
755			   I40E_DEBUG_AQ_MESSAGE,
756			   "AQTX: Async flag not set along with postpone flag");
757		status = I40E_ERR_PARAM;
758		goto asq_send_command_error;
759	}
760
761	/* call clean and check queue available function to reclaim the
762	 * descriptors that were processed by FW, the function returns the
763	 * number of desc available
764	 */
765	/* the clean function called here could be called in a separate thread
766	 * in case of asynchronous completions
767	 */
768	if (i40e_clean_asq(hw) == 0) {
769		i40e_debug(hw,
770			   I40E_DEBUG_AQ_MESSAGE,
771			   "AQTX: Error queue is full.\n");
772		status = I40E_ERR_ADMIN_QUEUE_FULL;
773		goto asq_send_command_error;
774	}
775
776	/* initialize the temp desc pointer with the right desc */
777	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
778
779	/* if the desc is available copy the temp desc to the right place */
780	*desc_on_ring = *desc;
781
782	/* if buff is not NULL assume indirect command */
783	if (buff != NULL) {
784		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
785		/* copy the user buff into the respective DMA buff */
786		memcpy(dma_buff->va, buff, buff_size);
787		desc_on_ring->datalen = cpu_to_le16(buff_size);
788
789		/* Update the address values in the desc with the pa value
790		 * for respective buffer
791		 */
792		desc_on_ring->params.external.addr_high =
793				cpu_to_le32(upper_32_bits(dma_buff->pa));
794		desc_on_ring->params.external.addr_low =
795				cpu_to_le32(lower_32_bits(dma_buff->pa));
796	}
797
798	/* bump the tail */
799	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
800	(hw->aq.asq.next_to_use)++;
801	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
802		hw->aq.asq.next_to_use = 0;
803	if (!details->postpone)
804		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
805
806	/* if cmd_details are not defined or async flag is not set,
807	 * we need to wait for desc write back
808	 */
809	if (!details->async && !details->postpone) {
810		u32 total_delay = 0;
811		u32 delay_len = 10;
812
813		do {
814			/* AQ designers suggest use of head for better
815			 * timing reliability than DD bit
816			 */
817			if (i40e_asq_done(hw))
818				break;
819			/* ugh! delay while spin_lock */
820			udelay(delay_len);
821			total_delay += delay_len;
822		} while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
823	}
824
825	/* if ready, copy the desc back to temp */
826	if (i40e_asq_done(hw)) {
827		*desc = *desc_on_ring;
828		if (buff != NULL)
829			memcpy(buff, dma_buff->va, buff_size);
830		retval = le16_to_cpu(desc->retval);
831		if (retval != 0) {
832			i40e_debug(hw,
833				   I40E_DEBUG_AQ_MESSAGE,
834				   "AQTX: Command completed with error 0x%X.\n",
835				   retval);
836			/* strip off FW internal code */
837			retval &= 0xff;
838		}
839		cmd_completed = true;
840		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
841			status = 0;
842		else
843			status = I40E_ERR_ADMIN_QUEUE_ERROR;
844		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
845	}
846
847	/* update the error if time out occurred */
848	if ((!cmd_completed) &&
849	    (!details->async && !details->postpone)) {
850		i40e_debug(hw,
851			   I40E_DEBUG_AQ_MESSAGE,
852			   "AQTX: Writeback timeout.\n");
853		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
854	}
855
856asq_send_command_error:
857	mutex_unlock(&hw->aq.asq_mutex);
858asq_send_command_exit:
859	return status;
860}
861
862/**
863 *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
864 *  @desc:     pointer to the temp descriptor (non DMA mem)
865 *  @opcode:   the opcode can be used to decide which flags to turn off or on
866 *
867 *  Fill the desc with default values
868 **/
869void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
870				       u16 opcode)
871{
872	/* zero out the desc */
873	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
874	desc->opcode = cpu_to_le16(opcode);
875	desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
876}
877
878/**
879 *  i40e_clean_arq_element
880 *  @hw: pointer to the hw struct
881 *  @e: event info from the receive descriptor, includes any buffers
882 *  @pending: number of events that could be left to process
883 *
884 *  This function cleans one Admin Receive Queue element and returns
885 *  the contents through e.  It can also return how many events are
886 *  left to process through 'pending'
887 **/
888i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
889					     struct i40e_arq_event_info *e,
890					     u16 *pending)
891{
892	i40e_status ret_code = 0;
893	u16 ntc = hw->aq.arq.next_to_clean;
894	struct i40e_aq_desc *desc;
895	struct i40e_dma_mem *bi;
896	u16 desc_idx;
897	u16 datalen;
898	u16 flags;
899	u16 ntu;
900
901	/* take the lock before we start messing with the ring */
902	mutex_lock(&hw->aq.arq_mutex);
903
904	/* set next_to_use to head */
905	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
906	if (ntu == ntc) {
907		/* nothing to do - shouldn't need to update ring's values */
908		i40e_debug(hw,
909			   I40E_DEBUG_AQ_MESSAGE,
910			   "AQRX: Queue is empty.\n");
911		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
912		goto clean_arq_element_out;
913	}
914
915	/* now clean the next descriptor */
916	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
917	desc_idx = ntc;
918	i40e_debug_aq(hw,
919		      I40E_DEBUG_AQ_COMMAND,
920		      (void *)desc,
921		      hw->aq.arq.r.arq_bi[desc_idx].va);
922
923	flags = le16_to_cpu(desc->flags);
924	if (flags & I40E_AQ_FLAG_ERR) {
925		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
926		hw->aq.arq_last_status =
927			(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
928		i40e_debug(hw,
929			   I40E_DEBUG_AQ_MESSAGE,
930			   "AQRX: Event received with error 0x%X.\n",
931			   hw->aq.arq_last_status);
932	} else {
933		memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
934		datalen = le16_to_cpu(desc->datalen);
935		e->msg_size = min(datalen, e->msg_size);
936		if (e->msg_buf != NULL && (e->msg_size != 0))
937			memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
938			       e->msg_size);
939	}
940
941	/* Restore the original datalen and buffer address in the desc,
942	 * FW updates datalen to indicate the event message
943	 * size
944	 */
945	bi = &hw->aq.arq.r.arq_bi[ntc];
946	desc->datalen = cpu_to_le16((u16)bi->size);
947	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
948	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
949
950	/* set tail = the last cleaned desc index. */
951	wr32(hw, hw->aq.arq.tail, ntc);
952	/* ntc is updated to tail + 1 */
953	ntc++;
954	if (ntc == hw->aq.num_arq_entries)
955		ntc = 0;
956	hw->aq.arq.next_to_clean = ntc;
957	hw->aq.arq.next_to_use = ntu;
958
959clean_arq_element_out:
960	/* Set pending if needed, unlock and return */
961	if (pending != NULL)
962		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
963	mutex_unlock(&hw->aq.arq_mutex);
964
965	return ret_code;
966}
967
968static void i40e_resume_aq(struct i40e_hw *hw)
969{
970	u32 reg = 0;
971
972	/* Registers are reset after PF reset */
973	hw->aq.asq.next_to_use = 0;
974	hw->aq.asq.next_to_clean = 0;
975
976	i40e_config_asq_regs(hw);
977	reg = hw->aq.num_asq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK;
978	wr32(hw, hw->aq.asq.len, reg);
979
980	hw->aq.arq.next_to_use = 0;
981	hw->aq.arq.next_to_clean = 0;
982
983	i40e_config_arq_regs(hw);
984	reg = hw->aq.num_arq_entries | I40E_PF_ATQLEN_ATQENABLE_MASK;
985	wr32(hw, hw->aq.arq.len, reg);
986}
987