[go: nahoru, domu]

1/*
2 * PCIe host controller driver for Tegra SoCs
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
25 */
26
27#include <linux/clk.h>
28#include <linux/debugfs.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
41#include <linux/phy/phy.h>
42#include <linux/platform_device.h>
43#include <linux/reset.h>
44#include <linux/sizes.h>
45#include <linux/slab.h>
46#include <linux/vmalloc.h>
47#include <linux/regulator/consumer.h>
48
49#include <soc/tegra/cpuidle.h>
50#include <soc/tegra/pmc.h>
51
52#include <asm/mach/irq.h>
53#include <asm/mach/map.h>
54#include <asm/mach/pci.h>
55
56#define INT_PCI_MSI_NR (8 * 32)
57
58/* register definitions */
59
60#define AFI_AXI_BAR0_SZ	0x00
61#define AFI_AXI_BAR1_SZ	0x04
62#define AFI_AXI_BAR2_SZ	0x08
63#define AFI_AXI_BAR3_SZ	0x0c
64#define AFI_AXI_BAR4_SZ	0x10
65#define AFI_AXI_BAR5_SZ	0x14
66
67#define AFI_AXI_BAR0_START	0x18
68#define AFI_AXI_BAR1_START	0x1c
69#define AFI_AXI_BAR2_START	0x20
70#define AFI_AXI_BAR3_START	0x24
71#define AFI_AXI_BAR4_START	0x28
72#define AFI_AXI_BAR5_START	0x2c
73
74#define AFI_FPCI_BAR0	0x30
75#define AFI_FPCI_BAR1	0x34
76#define AFI_FPCI_BAR2	0x38
77#define AFI_FPCI_BAR3	0x3c
78#define AFI_FPCI_BAR4	0x40
79#define AFI_FPCI_BAR5	0x44
80
81#define AFI_CACHE_BAR0_SZ	0x48
82#define AFI_CACHE_BAR0_ST	0x4c
83#define AFI_CACHE_BAR1_SZ	0x50
84#define AFI_CACHE_BAR1_ST	0x54
85
86#define AFI_MSI_BAR_SZ		0x60
87#define AFI_MSI_FPCI_BAR_ST	0x64
88#define AFI_MSI_AXI_BAR_ST	0x68
89
90#define AFI_MSI_VEC0		0x6c
91#define AFI_MSI_VEC1		0x70
92#define AFI_MSI_VEC2		0x74
93#define AFI_MSI_VEC3		0x78
94#define AFI_MSI_VEC4		0x7c
95#define AFI_MSI_VEC5		0x80
96#define AFI_MSI_VEC6		0x84
97#define AFI_MSI_VEC7		0x88
98
99#define AFI_MSI_EN_VEC0		0x8c
100#define AFI_MSI_EN_VEC1		0x90
101#define AFI_MSI_EN_VEC2		0x94
102#define AFI_MSI_EN_VEC3		0x98
103#define AFI_MSI_EN_VEC4		0x9c
104#define AFI_MSI_EN_VEC5		0xa0
105#define AFI_MSI_EN_VEC6		0xa4
106#define AFI_MSI_EN_VEC7		0xa8
107
108#define AFI_CONFIGURATION		0xac
109#define  AFI_CONFIGURATION_EN_FPCI	(1 << 0)
110
111#define AFI_FPCI_ERROR_MASKS	0xb0
112
113#define AFI_INTR_MASK		0xb4
114#define  AFI_INTR_MASK_INT_MASK	(1 << 0)
115#define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
116
117#define AFI_INTR_CODE			0xb8
118#define  AFI_INTR_CODE_MASK		0xf
119#define  AFI_INTR_INI_SLAVE_ERROR	1
120#define  AFI_INTR_INI_DECODE_ERROR	2
121#define  AFI_INTR_TARGET_ABORT		3
122#define  AFI_INTR_MASTER_ABORT		4
123#define  AFI_INTR_INVALID_WRITE		5
124#define  AFI_INTR_LEGACY		6
125#define  AFI_INTR_FPCI_DECODE_ERROR	7
126#define  AFI_INTR_AXI_DECODE_ERROR	8
127#define  AFI_INTR_FPCI_TIMEOUT		9
128#define  AFI_INTR_PE_PRSNT_SENSE	10
129#define  AFI_INTR_PE_CLKREQ_SENSE	11
130#define  AFI_INTR_CLKCLAMP_SENSE	12
131#define  AFI_INTR_RDY4PD_SENSE		13
132#define  AFI_INTR_P2P_ERROR		14
133
134#define AFI_INTR_SIGNATURE	0xbc
135#define AFI_UPPER_FPCI_ADDRESS	0xc0
136#define AFI_SM_INTR_ENABLE	0xc4
137#define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
138#define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
139#define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
140#define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
141#define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
142#define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
143#define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
144#define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
145
146#define AFI_AFI_INTR_ENABLE		0xc8
147#define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
148#define  AFI_INTR_EN_INI_DECERR		(1 << 1)
149#define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
150#define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
151#define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
152#define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
153#define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
154#define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
155#define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
156
157#define AFI_PCIE_CONFIG					0x0f8
158#define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
159#define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
160#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
161#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
162#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
163#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
164#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
165#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
166#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
167#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
168
169#define AFI_FUSE			0x104
170#define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
171
172#define AFI_PEX0_CTRL			0x110
173#define AFI_PEX1_CTRL			0x118
174#define AFI_PEX2_CTRL			0x128
175#define  AFI_PEX_CTRL_RST		(1 << 0)
176#define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
177#define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
178#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
179
180#define AFI_PLLE_CONTROL		0x160
181#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
183
184#define AFI_PEXBIAS_CTRL_0		0x168
185
186#define RP_VEND_XP	0x00000F00
187#define  RP_VEND_XP_DL_UP	(1 << 30)
188
189#define RP_PRIV_MISC	0x00000FE0
190#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
192
193#define RP_LINK_CONTROL_STATUS			0x00000090
194#define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
195#define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
196
197#define PADS_CTL_SEL		0x0000009C
198
199#define PADS_CTL		0x000000A0
200#define  PADS_CTL_IDDQ_1L	(1 << 0)
201#define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
202#define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
203
204#define PADS_PLL_CTL_TEGRA20			0x000000B8
205#define PADS_PLL_CTL_TEGRA30			0x000000B4
206#define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
207#define  PADS_PLL_CTL_LOCKDET			(1 << 8)
208#define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
209#define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
210#define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
211#define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
212#define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
213#define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
214#define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
215#define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
216
217#define PADS_REFCLK_CFG0			0x000000C8
218#define PADS_REFCLK_CFG1			0x000000CC
219#define PADS_REFCLK_BIAS			0x000000D0
220
221/*
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA.
225 */
226#define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
227#define PADS_REFCLK_CFG_E_TERM_SHIFT		7
228#define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
229#define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
230
231/* Default value provided by HW engineering is 0xfa5c */
232#define PADS_REFCLK_CFG_VALUE \
233	( \
234		(0x17 << PADS_REFCLK_CFG_TERM_SHIFT)   | \
235		(0    << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236		(0xa  << PADS_REFCLK_CFG_PREDI_SHIFT)  | \
237		(0xf  << PADS_REFCLK_CFG_DRVI_SHIFT)     \
238	)
239
240struct tegra_msi {
241	struct msi_chip chip;
242	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243	struct irq_domain *domain;
244	unsigned long pages;
245	struct mutex lock;
246	int irq;
247};
248
249/* used to differentiate between Tegra SoC generations */
250struct tegra_pcie_soc_data {
251	unsigned int num_ports;
252	unsigned int msi_base_shift;
253	u32 pads_pll_ctl;
254	u32 tx_ref_sel;
255	bool has_pex_clkreq_en;
256	bool has_pex_bias_ctrl;
257	bool has_intr_prsnt_sense;
258	bool has_cml_clk;
259	bool has_gen2;
260};
261
262static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
263{
264	return container_of(chip, struct tegra_msi, chip);
265}
266
267struct tegra_pcie {
268	struct device *dev;
269
270	void __iomem *pads;
271	void __iomem *afi;
272	int irq;
273
274	struct list_head buses;
275	struct resource *cs;
276
277	struct resource all;
278	struct resource io;
279	struct resource pio;
280	struct resource mem;
281	struct resource prefetch;
282	struct resource busn;
283
284	struct clk *pex_clk;
285	struct clk *afi_clk;
286	struct clk *pll_e;
287	struct clk *cml_clk;
288
289	struct reset_control *pex_rst;
290	struct reset_control *afi_rst;
291	struct reset_control *pcie_xrst;
292
293	struct phy *phy;
294
295	struct tegra_msi msi;
296
297	struct list_head ports;
298	unsigned int num_ports;
299	u32 xbar_config;
300
301	struct regulator_bulk_data *supplies;
302	unsigned int num_supplies;
303
304	const struct tegra_pcie_soc_data *soc_data;
305	struct dentry *debugfs;
306};
307
308struct tegra_pcie_port {
309	struct tegra_pcie *pcie;
310	struct list_head list;
311	struct resource regs;
312	void __iomem *base;
313	unsigned int index;
314	unsigned int lanes;
315};
316
317struct tegra_pcie_bus {
318	struct vm_struct *area;
319	struct list_head list;
320	unsigned int nr;
321};
322
323static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
324{
325	return sys->private_data;
326}
327
328static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
329			      unsigned long offset)
330{
331	writel(value, pcie->afi + offset);
332}
333
334static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
335{
336	return readl(pcie->afi + offset);
337}
338
339static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
340			       unsigned long offset)
341{
342	writel(value, pcie->pads + offset);
343}
344
345static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
346{
347	return readl(pcie->pads + offset);
348}
349
350/*
351 * The configuration space mapping on Tegra is somewhat similar to the ECAM
352 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
353 * register accesses are mapped:
354 *
355 *    [27:24] extended register number
356 *    [23:16] bus number
357 *    [15:11] device number
358 *    [10: 8] function number
359 *    [ 7: 0] register number
360 *
361 * Mapping the whole extended configuration space would require 256 MiB of
362 * virtual address space, only a small part of which will actually be used.
363 * To work around this, a 1 MiB of virtual addresses are allocated per bus
364 * when the bus is first accessed. When the physical range is mapped, the
365 * the bus number bits are hidden so that the extended register number bits
366 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
367 *
368 *    [19:16] extended register number
369 *    [15:11] device number
370 *    [10: 8] function number
371 *    [ 7: 0] register number
372 *
373 * This is achieved by stitching together 16 chunks of 64 KiB of physical
374 * address space via the MMU.
375 */
376static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
377{
378	return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
379	       (PCI_FUNC(devfn) << 8) | (where & 0xfc);
380}
381
382static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
383						   unsigned int busnr)
384{
385	pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
386			L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
387	phys_addr_t cs = pcie->cs->start;
388	struct tegra_pcie_bus *bus;
389	unsigned int i;
390	int err;
391
392	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
393	if (!bus)
394		return ERR_PTR(-ENOMEM);
395
396	INIT_LIST_HEAD(&bus->list);
397	bus->nr = busnr;
398
399	/* allocate 1 MiB of virtual addresses */
400	bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
401	if (!bus->area) {
402		err = -ENOMEM;
403		goto free;
404	}
405
406	/* map each of the 16 chunks of 64 KiB each */
407	for (i = 0; i < 16; i++) {
408		unsigned long virt = (unsigned long)bus->area->addr +
409				     i * SZ_64K;
410		phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
411
412		err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
413		if (err < 0) {
414			dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
415				err);
416			goto unmap;
417		}
418	}
419
420	return bus;
421
422unmap:
423	vunmap(bus->area->addr);
424free:
425	kfree(bus);
426	return ERR_PTR(err);
427}
428
429/*
430 * Look up a virtual address mapping for the specified bus number. If no such
431 * mapping exists, try to create one.
432 */
433static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
434					unsigned int busnr)
435{
436	struct tegra_pcie_bus *bus;
437
438	list_for_each_entry(bus, &pcie->buses, list)
439		if (bus->nr == busnr)
440			return (void __iomem *)bus->area->addr;
441
442	bus = tegra_pcie_bus_alloc(pcie, busnr);
443	if (IS_ERR(bus))
444		return NULL;
445
446	list_add_tail(&bus->list, &pcie->buses);
447
448	return (void __iomem *)bus->area->addr;
449}
450
451static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
452					     unsigned int devfn,
453					     int where)
454{
455	struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
456	void __iomem *addr = NULL;
457
458	if (bus->number == 0) {
459		unsigned int slot = PCI_SLOT(devfn);
460		struct tegra_pcie_port *port;
461
462		list_for_each_entry(port, &pcie->ports, list) {
463			if (port->index + 1 == slot) {
464				addr = port->base + (where & ~3);
465				break;
466			}
467		}
468	} else {
469		addr = tegra_pcie_bus_map(pcie, bus->number);
470		if (!addr) {
471			dev_err(pcie->dev,
472				"failed to map cfg. space for bus %u\n",
473				bus->number);
474			return NULL;
475		}
476
477		addr += tegra_pcie_conf_offset(devfn, where);
478	}
479
480	return addr;
481}
482
483static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
484				int where, int size, u32 *value)
485{
486	void __iomem *addr;
487
488	addr = tegra_pcie_conf_address(bus, devfn, where);
489	if (!addr) {
490		*value = 0xffffffff;
491		return PCIBIOS_DEVICE_NOT_FOUND;
492	}
493
494	*value = readl(addr);
495
496	if (size == 1)
497		*value = (*value >> (8 * (where & 3))) & 0xff;
498	else if (size == 2)
499		*value = (*value >> (8 * (where & 3))) & 0xffff;
500
501	return PCIBIOS_SUCCESSFUL;
502}
503
504static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
505				 int where, int size, u32 value)
506{
507	void __iomem *addr;
508	u32 mask, tmp;
509
510	addr = tegra_pcie_conf_address(bus, devfn, where);
511	if (!addr)
512		return PCIBIOS_DEVICE_NOT_FOUND;
513
514	if (size == 4) {
515		writel(value, addr);
516		return PCIBIOS_SUCCESSFUL;
517	}
518
519	if (size == 2)
520		mask = ~(0xffff << ((where & 0x3) * 8));
521	else if (size == 1)
522		mask = ~(0xff << ((where & 0x3) * 8));
523	else
524		return PCIBIOS_BAD_REGISTER_NUMBER;
525
526	tmp = readl(addr) & mask;
527	tmp |= value << ((where & 0x3) * 8);
528	writel(tmp, addr);
529
530	return PCIBIOS_SUCCESSFUL;
531}
532
533static struct pci_ops tegra_pcie_ops = {
534	.read = tegra_pcie_read_conf,
535	.write = tegra_pcie_write_conf,
536};
537
538static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
539{
540	unsigned long ret = 0;
541
542	switch (port->index) {
543	case 0:
544		ret = AFI_PEX0_CTRL;
545		break;
546
547	case 1:
548		ret = AFI_PEX1_CTRL;
549		break;
550
551	case 2:
552		ret = AFI_PEX2_CTRL;
553		break;
554	}
555
556	return ret;
557}
558
559static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
560{
561	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
562	unsigned long value;
563
564	/* pulse reset signal */
565	value = afi_readl(port->pcie, ctrl);
566	value &= ~AFI_PEX_CTRL_RST;
567	afi_writel(port->pcie, value, ctrl);
568
569	usleep_range(1000, 2000);
570
571	value = afi_readl(port->pcie, ctrl);
572	value |= AFI_PEX_CTRL_RST;
573	afi_writel(port->pcie, value, ctrl);
574}
575
576static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
577{
578	const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
579	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
580	unsigned long value;
581
582	/* enable reference clock */
583	value = afi_readl(port->pcie, ctrl);
584	value |= AFI_PEX_CTRL_REFCLK_EN;
585
586	if (soc->has_pex_clkreq_en)
587		value |= AFI_PEX_CTRL_CLKREQ_EN;
588
589	value |= AFI_PEX_CTRL_OVERRIDE_EN;
590
591	afi_writel(port->pcie, value, ctrl);
592
593	tegra_pcie_port_reset(port);
594}
595
596static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
597{
598	const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
599	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
600	unsigned long value;
601
602	/* assert port reset */
603	value = afi_readl(port->pcie, ctrl);
604	value &= ~AFI_PEX_CTRL_RST;
605	afi_writel(port->pcie, value, ctrl);
606
607	/* disable reference clock */
608	value = afi_readl(port->pcie, ctrl);
609
610	if (soc->has_pex_clkreq_en)
611		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
612
613	value &= ~AFI_PEX_CTRL_REFCLK_EN;
614	afi_writel(port->pcie, value, ctrl);
615}
616
617static void tegra_pcie_port_free(struct tegra_pcie_port *port)
618{
619	struct tegra_pcie *pcie = port->pcie;
620
621	devm_iounmap(pcie->dev, port->base);
622	devm_release_mem_region(pcie->dev, port->regs.start,
623				resource_size(&port->regs));
624	list_del(&port->list);
625	devm_kfree(pcie->dev, port);
626}
627
628static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
629{
630	u16 reg;
631
632	if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
633		pci_read_config_word(dev, PCI_COMMAND, &reg);
634		reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
635			PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
636		pci_write_config_word(dev, PCI_COMMAND, reg);
637	}
638}
639DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
640
641/* Tegra PCIE root complex wrongly reports device class */
642static void tegra_pcie_fixup_class(struct pci_dev *dev)
643{
644	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
645}
646DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
647DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
648DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
649DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
650
651/* Tegra PCIE requires relaxed ordering */
652static void tegra_pcie_relax_enable(struct pci_dev *dev)
653{
654	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
655}
656DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
657
658static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
659{
660	struct tegra_pcie *pcie = sys_to_pcie(sys);
661	int err;
662
663	err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664	if (err < 0)
665		return err;
666
667	err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
668	if (err)
669		return err;
670
671	pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
672	pci_add_resource_offset(&sys->resources, &pcie->prefetch,
673				sys->mem_offset);
674	pci_add_resource(&sys->resources, &pcie->busn);
675
676	pci_ioremap_io(pcie->pio.start, pcie->io.start);
677
678	return 1;
679}
680
681static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
682{
683	struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
684	int irq;
685
686	tegra_cpuidle_pcie_irqs_in_use();
687
688	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
689	if (!irq)
690		irq = pcie->irq;
691
692	return irq;
693}
694
695static void tegra_pcie_add_bus(struct pci_bus *bus)
696{
697	if (IS_ENABLED(CONFIG_PCI_MSI)) {
698		struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
699
700		bus->msi = &pcie->msi.chip;
701	}
702}
703
704static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
705{
706	struct tegra_pcie *pcie = sys_to_pcie(sys);
707	struct pci_bus *bus;
708
709	bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
710				  &sys->resources);
711	if (!bus)
712		return NULL;
713
714	pci_scan_child_bus(bus);
715
716	return bus;
717}
718
719static irqreturn_t tegra_pcie_isr(int irq, void *arg)
720{
721	const char *err_msg[] = {
722		"Unknown",
723		"AXI slave error",
724		"AXI decode error",
725		"Target abort",
726		"Master abort",
727		"Invalid write",
728		"Legacy interrupt",
729		"Response decoding error",
730		"AXI response decoding error",
731		"Transaction timeout",
732		"Slot present pin change",
733		"Slot clock request change",
734		"TMS clock ramp change",
735		"TMS ready for power down",
736		"Peer2Peer error",
737	};
738	struct tegra_pcie *pcie = arg;
739	u32 code, signature;
740
741	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
742	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
743	afi_writel(pcie, 0, AFI_INTR_CODE);
744
745	if (code == AFI_INTR_LEGACY)
746		return IRQ_NONE;
747
748	if (code >= ARRAY_SIZE(err_msg))
749		code = 0;
750
751	/*
752	 * do not pollute kernel log with master abort reports since they
753	 * happen a lot during enumeration
754	 */
755	if (code == AFI_INTR_MASTER_ABORT)
756		dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
757			signature);
758	else
759		dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
760			signature);
761
762	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
763	    code == AFI_INTR_FPCI_DECODE_ERROR) {
764		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
765		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
766
767		if (code == AFI_INTR_MASTER_ABORT)
768			dev_dbg(pcie->dev, "  FPCI address: %10llx\n", address);
769		else
770			dev_err(pcie->dev, "  FPCI address: %10llx\n", address);
771	}
772
773	return IRQ_HANDLED;
774}
775
776/*
777 * FPCI map is as follows:
778 * - 0xfdfc000000: I/O space
779 * - 0xfdfe000000: type 0 configuration space
780 * - 0xfdff000000: type 1 configuration space
781 * - 0xfe00000000: type 0 extended configuration space
782 * - 0xfe10000000: type 1 extended configuration space
783 */
784static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
785{
786	u32 fpci_bar, size, axi_address;
787
788	/* Bar 0: type 1 extended configuration space */
789	fpci_bar = 0xfe100000;
790	size = resource_size(pcie->cs);
791	axi_address = pcie->cs->start;
792	afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
793	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
794	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
795
796	/* Bar 1: downstream IO bar */
797	fpci_bar = 0xfdfc0000;
798	size = resource_size(&pcie->io);
799	axi_address = pcie->io.start;
800	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
801	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
802	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
803
804	/* Bar 2: prefetchable memory BAR */
805	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
806	size = resource_size(&pcie->prefetch);
807	axi_address = pcie->prefetch.start;
808	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
809	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
810	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
811
812	/* Bar 3: non prefetchable memory BAR */
813	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
814	size = resource_size(&pcie->mem);
815	axi_address = pcie->mem.start;
816	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
817	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
818	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
819
820	/* NULL out the remaining BARs as they are not used */
821	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
822	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
823	afi_writel(pcie, 0, AFI_FPCI_BAR4);
824
825	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
826	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
827	afi_writel(pcie, 0, AFI_FPCI_BAR5);
828
829	/* map all upstream transactions as uncached */
830	afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
831	afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
832	afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
833	afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
834
835	/* MSI translations are setup only when needed */
836	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
837	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
838	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
839	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
840}
841
842static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
843{
844	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
845	u32 value;
846
847	timeout = jiffies + msecs_to_jiffies(timeout);
848
849	while (time_before(jiffies, timeout)) {
850		value = pads_readl(pcie, soc->pads_pll_ctl);
851		if (value & PADS_PLL_CTL_LOCKDET)
852			return 0;
853	}
854
855	return -ETIMEDOUT;
856}
857
858static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
859{
860	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
861	u32 value;
862	int err;
863
864	/* initialize internal PHY, enable up to 16 PCIE lanes */
865	pads_writel(pcie, 0x0, PADS_CTL_SEL);
866
867	/* override IDDQ to 1 on all 4 lanes */
868	value = pads_readl(pcie, PADS_CTL);
869	value |= PADS_CTL_IDDQ_1L;
870	pads_writel(pcie, value, PADS_CTL);
871
872	/*
873	 * Set up PHY PLL inputs select PLLE output as refclock,
874	 * set TX ref sel to div10 (not div5).
875	 */
876	value = pads_readl(pcie, soc->pads_pll_ctl);
877	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
878	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
879	pads_writel(pcie, value, soc->pads_pll_ctl);
880
881	/* reset PLL */
882	value = pads_readl(pcie, soc->pads_pll_ctl);
883	value &= ~PADS_PLL_CTL_RST_B4SM;
884	pads_writel(pcie, value, soc->pads_pll_ctl);
885
886	usleep_range(20, 100);
887
888	/* take PLL out of reset  */
889	value = pads_readl(pcie, soc->pads_pll_ctl);
890	value |= PADS_PLL_CTL_RST_B4SM;
891	pads_writel(pcie, value, soc->pads_pll_ctl);
892
893	/* Configure the reference clock driver */
894	value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
895	pads_writel(pcie, value, PADS_REFCLK_CFG0);
896	if (soc->num_ports > 2)
897		pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
898
899	/* wait for the PLL to lock */
900	err = tegra_pcie_pll_wait(pcie, 500);
901	if (err < 0) {
902		dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
903		return err;
904	}
905
906	/* turn off IDDQ override */
907	value = pads_readl(pcie, PADS_CTL);
908	value &= ~PADS_CTL_IDDQ_1L;
909	pads_writel(pcie, value, PADS_CTL);
910
911	/* enable TX/RX data */
912	value = pads_readl(pcie, PADS_CTL);
913	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
914	pads_writel(pcie, value, PADS_CTL);
915
916	return 0;
917}
918
919static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
920{
921	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
922	struct tegra_pcie_port *port;
923	unsigned long value;
924	int err;
925
926	/* enable PLL power down */
927	if (pcie->phy) {
928		value = afi_readl(pcie, AFI_PLLE_CONTROL);
929		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
930		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
931		afi_writel(pcie, value, AFI_PLLE_CONTROL);
932	}
933
934	/* power down PCIe slot clock bias pad */
935	if (soc->has_pex_bias_ctrl)
936		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
937
938	/* configure mode and disable all ports */
939	value = afi_readl(pcie, AFI_PCIE_CONFIG);
940	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
941	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
942
943	list_for_each_entry(port, &pcie->ports, list)
944		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
945
946	afi_writel(pcie, value, AFI_PCIE_CONFIG);
947
948	if (soc->has_gen2) {
949		value = afi_readl(pcie, AFI_FUSE);
950		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
951		afi_writel(pcie, value, AFI_FUSE);
952	} else {
953		value = afi_readl(pcie, AFI_FUSE);
954		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
955		afi_writel(pcie, value, AFI_FUSE);
956	}
957
958	if (!pcie->phy)
959		err = tegra_pcie_phy_enable(pcie);
960	else
961		err = phy_power_on(pcie->phy);
962
963	if (err < 0) {
964		dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
965		return err;
966	}
967
968	/* take the PCIe interface module out of reset */
969	reset_control_deassert(pcie->pcie_xrst);
970
971	/* finally enable PCIe */
972	value = afi_readl(pcie, AFI_CONFIGURATION);
973	value |= AFI_CONFIGURATION_EN_FPCI;
974	afi_writel(pcie, value, AFI_CONFIGURATION);
975
976	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
977		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
978		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
979
980	if (soc->has_intr_prsnt_sense)
981		value |= AFI_INTR_EN_PRSNT_SENSE;
982
983	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
984	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
985
986	/* don't enable MSI for now, only when needed */
987	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
988
989	/* disable all exceptions */
990	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
991
992	return 0;
993}
994
995static void tegra_pcie_power_off(struct tegra_pcie *pcie)
996{
997	int err;
998
999	/* TODO: disable and unprepare clocks? */
1000
1001	err = phy_power_off(pcie->phy);
1002	if (err < 0)
1003		dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
1004
1005	reset_control_assert(pcie->pcie_xrst);
1006	reset_control_assert(pcie->afi_rst);
1007	reset_control_assert(pcie->pex_rst);
1008
1009	tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1010
1011	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1012	if (err < 0)
1013		dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
1014}
1015
1016static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1017{
1018	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1019	int err;
1020
1021	reset_control_assert(pcie->pcie_xrst);
1022	reset_control_assert(pcie->afi_rst);
1023	reset_control_assert(pcie->pex_rst);
1024
1025	tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1026
1027	/* enable regulators */
1028	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1029	if (err < 0)
1030		dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
1031
1032	err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1033						pcie->pex_clk,
1034						pcie->pex_rst);
1035	if (err) {
1036		dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
1037		return err;
1038	}
1039
1040	reset_control_deassert(pcie->afi_rst);
1041
1042	err = clk_prepare_enable(pcie->afi_clk);
1043	if (err < 0) {
1044		dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
1045		return err;
1046	}
1047
1048	if (soc->has_cml_clk) {
1049		err = clk_prepare_enable(pcie->cml_clk);
1050		if (err < 0) {
1051			dev_err(pcie->dev, "failed to enable CML clock: %d\n",
1052				err);
1053			return err;
1054		}
1055	}
1056
1057	err = clk_prepare_enable(pcie->pll_e);
1058	if (err < 0) {
1059		dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
1060		return err;
1061	}
1062
1063	return 0;
1064}
1065
1066static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1067{
1068	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1069
1070	pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1071	if (IS_ERR(pcie->pex_clk))
1072		return PTR_ERR(pcie->pex_clk);
1073
1074	pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1075	if (IS_ERR(pcie->afi_clk))
1076		return PTR_ERR(pcie->afi_clk);
1077
1078	pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1079	if (IS_ERR(pcie->pll_e))
1080		return PTR_ERR(pcie->pll_e);
1081
1082	if (soc->has_cml_clk) {
1083		pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1084		if (IS_ERR(pcie->cml_clk))
1085			return PTR_ERR(pcie->cml_clk);
1086	}
1087
1088	return 0;
1089}
1090
1091static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1092{
1093	pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1094	if (IS_ERR(pcie->pex_rst))
1095		return PTR_ERR(pcie->pex_rst);
1096
1097	pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1098	if (IS_ERR(pcie->afi_rst))
1099		return PTR_ERR(pcie->afi_rst);
1100
1101	pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1102	if (IS_ERR(pcie->pcie_xrst))
1103		return PTR_ERR(pcie->pcie_xrst);
1104
1105	return 0;
1106}
1107
1108static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1109{
1110	struct platform_device *pdev = to_platform_device(pcie->dev);
1111	struct resource *pads, *afi, *res;
1112	int err;
1113
1114	err = tegra_pcie_clocks_get(pcie);
1115	if (err) {
1116		dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1117		return err;
1118	}
1119
1120	err = tegra_pcie_resets_get(pcie);
1121	if (err) {
1122		dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1123		return err;
1124	}
1125
1126	pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1127	if (IS_ERR(pcie->phy)) {
1128		err = PTR_ERR(pcie->phy);
1129		dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1130		return err;
1131	}
1132
1133	err = phy_init(pcie->phy);
1134	if (err < 0) {
1135		dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1136		return err;
1137	}
1138
1139	err = tegra_pcie_power_on(pcie);
1140	if (err) {
1141		dev_err(&pdev->dev, "failed to power up: %d\n", err);
1142		return err;
1143	}
1144
1145	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1146	pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1147	if (IS_ERR(pcie->pads)) {
1148		err = PTR_ERR(pcie->pads);
1149		goto poweroff;
1150	}
1151
1152	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1153	pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1154	if (IS_ERR(pcie->afi)) {
1155		err = PTR_ERR(pcie->afi);
1156		goto poweroff;
1157	}
1158
1159	/* request configuration space, but remap later, on demand */
1160	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1161	if (!res) {
1162		err = -EADDRNOTAVAIL;
1163		goto poweroff;
1164	}
1165
1166	pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1167					   resource_size(res), res->name);
1168	if (!pcie->cs) {
1169		err = -EADDRNOTAVAIL;
1170		goto poweroff;
1171	}
1172
1173	/* request interrupt */
1174	err = platform_get_irq_byname(pdev, "intr");
1175	if (err < 0) {
1176		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1177		goto poweroff;
1178	}
1179
1180	pcie->irq = err;
1181
1182	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1183	if (err) {
1184		dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1185		goto poweroff;
1186	}
1187
1188	return 0;
1189
1190poweroff:
1191	tegra_pcie_power_off(pcie);
1192	return err;
1193}
1194
1195static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1196{
1197	int err;
1198
1199	if (pcie->irq > 0)
1200		free_irq(pcie->irq, pcie);
1201
1202	tegra_pcie_power_off(pcie);
1203
1204	err = phy_exit(pcie->phy);
1205	if (err < 0)
1206		dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1207
1208	return 0;
1209}
1210
1211static int tegra_msi_alloc(struct tegra_msi *chip)
1212{
1213	int msi;
1214
1215	mutex_lock(&chip->lock);
1216
1217	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1218	if (msi < INT_PCI_MSI_NR)
1219		set_bit(msi, chip->used);
1220	else
1221		msi = -ENOSPC;
1222
1223	mutex_unlock(&chip->lock);
1224
1225	return msi;
1226}
1227
1228static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1229{
1230	struct device *dev = chip->chip.dev;
1231
1232	mutex_lock(&chip->lock);
1233
1234	if (!test_bit(irq, chip->used))
1235		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1236	else
1237		clear_bit(irq, chip->used);
1238
1239	mutex_unlock(&chip->lock);
1240}
1241
1242static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1243{
1244	struct tegra_pcie *pcie = data;
1245	struct tegra_msi *msi = &pcie->msi;
1246	unsigned int i, processed = 0;
1247
1248	for (i = 0; i < 8; i++) {
1249		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1250
1251		while (reg) {
1252			unsigned int offset = find_first_bit(&reg, 32);
1253			unsigned int index = i * 32 + offset;
1254			unsigned int irq;
1255
1256			/* clear the interrupt */
1257			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1258
1259			irq = irq_find_mapping(msi->domain, index);
1260			if (irq) {
1261				if (test_bit(index, msi->used))
1262					generic_handle_irq(irq);
1263				else
1264					dev_info(pcie->dev, "unhandled MSI\n");
1265			} else {
1266				/*
1267				 * that's weird who triggered this?
1268				 * just clear it
1269				 */
1270				dev_info(pcie->dev, "unexpected MSI\n");
1271			}
1272
1273			/* see if there's any more pending in this vector */
1274			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1275
1276			processed++;
1277		}
1278	}
1279
1280	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1281}
1282
1283static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1284			       struct msi_desc *desc)
1285{
1286	struct tegra_msi *msi = to_tegra_msi(chip);
1287	struct msi_msg msg;
1288	unsigned int irq;
1289	int hwirq;
1290
1291	hwirq = tegra_msi_alloc(msi);
1292	if (hwirq < 0)
1293		return hwirq;
1294
1295	irq = irq_create_mapping(msi->domain, hwirq);
1296	if (!irq) {
1297		tegra_msi_free(msi, hwirq);
1298		return -EINVAL;
1299	}
1300
1301	irq_set_msi_desc(irq, desc);
1302
1303	msg.address_lo = virt_to_phys((void *)msi->pages);
1304	/* 32 bit address only */
1305	msg.address_hi = 0;
1306	msg.data = hwirq;
1307
1308	write_msi_msg(irq, &msg);
1309
1310	return 0;
1311}
1312
1313static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1314{
1315	struct tegra_msi *msi = to_tegra_msi(chip);
1316	struct irq_data *d = irq_get_irq_data(irq);
1317	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1318
1319	irq_dispose_mapping(irq);
1320	tegra_msi_free(msi, hwirq);
1321}
1322
1323static struct irq_chip tegra_msi_irq_chip = {
1324	.name = "Tegra PCIe MSI",
1325	.irq_enable = unmask_msi_irq,
1326	.irq_disable = mask_msi_irq,
1327	.irq_mask = mask_msi_irq,
1328	.irq_unmask = unmask_msi_irq,
1329};
1330
1331static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1332			 irq_hw_number_t hwirq)
1333{
1334	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1335	irq_set_chip_data(irq, domain->host_data);
1336	set_irq_flags(irq, IRQF_VALID);
1337
1338	tegra_cpuidle_pcie_irqs_in_use();
1339
1340	return 0;
1341}
1342
1343static const struct irq_domain_ops msi_domain_ops = {
1344	.map = tegra_msi_map,
1345};
1346
1347static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1348{
1349	struct platform_device *pdev = to_platform_device(pcie->dev);
1350	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1351	struct tegra_msi *msi = &pcie->msi;
1352	unsigned long base;
1353	int err;
1354	u32 reg;
1355
1356	mutex_init(&msi->lock);
1357
1358	msi->chip.dev = pcie->dev;
1359	msi->chip.setup_irq = tegra_msi_setup_irq;
1360	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1361
1362	msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1363					    &msi_domain_ops, &msi->chip);
1364	if (!msi->domain) {
1365		dev_err(&pdev->dev, "failed to create IRQ domain\n");
1366		return -ENOMEM;
1367	}
1368
1369	err = platform_get_irq_byname(pdev, "msi");
1370	if (err < 0) {
1371		dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1372		goto err;
1373	}
1374
1375	msi->irq = err;
1376
1377	err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1378			  tegra_msi_irq_chip.name, pcie);
1379	if (err < 0) {
1380		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1381		goto err;
1382	}
1383
1384	/* setup AFI/FPCI range */
1385	msi->pages = __get_free_pages(GFP_KERNEL, 0);
1386	base = virt_to_phys((void *)msi->pages);
1387
1388	afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1389	afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1390	/* this register is in 4K increments */
1391	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1392
1393	/* enable all MSI vectors */
1394	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1395	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1396	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1397	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1398	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1399	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1400	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1401	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1402
1403	/* and unmask the MSI interrupt */
1404	reg = afi_readl(pcie, AFI_INTR_MASK);
1405	reg |= AFI_INTR_MASK_MSI_MASK;
1406	afi_writel(pcie, reg, AFI_INTR_MASK);
1407
1408	return 0;
1409
1410err:
1411	irq_domain_remove(msi->domain);
1412	return err;
1413}
1414
1415static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1416{
1417	struct tegra_msi *msi = &pcie->msi;
1418	unsigned int i, irq;
1419	u32 value;
1420
1421	/* mask the MSI interrupt */
1422	value = afi_readl(pcie, AFI_INTR_MASK);
1423	value &= ~AFI_INTR_MASK_MSI_MASK;
1424	afi_writel(pcie, value, AFI_INTR_MASK);
1425
1426	/* disable all MSI vectors */
1427	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1428	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1429	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1430	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1431	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1432	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1433	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1434	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1435
1436	free_pages(msi->pages, 0);
1437
1438	if (msi->irq > 0)
1439		free_irq(msi->irq, pcie);
1440
1441	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1442		irq = irq_find_mapping(msi->domain, i);
1443		if (irq > 0)
1444			irq_dispose_mapping(irq);
1445	}
1446
1447	irq_domain_remove(msi->domain);
1448
1449	return 0;
1450}
1451
1452static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1453				      u32 *xbar)
1454{
1455	struct device_node *np = pcie->dev->of_node;
1456
1457	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1458		switch (lanes) {
1459		case 0x0000104:
1460			dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1461			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1462			return 0;
1463
1464		case 0x0000102:
1465			dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1466			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1467			return 0;
1468		}
1469	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1470		switch (lanes) {
1471		case 0x00000204:
1472			dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1473			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1474			return 0;
1475
1476		case 0x00020202:
1477			dev_info(pcie->dev, "2x3 configuration\n");
1478			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1479			return 0;
1480
1481		case 0x00010104:
1482			dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1483			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1484			return 0;
1485		}
1486	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1487		switch (lanes) {
1488		case 0x00000004:
1489			dev_info(pcie->dev, "single-mode configuration\n");
1490			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1491			return 0;
1492
1493		case 0x00000202:
1494			dev_info(pcie->dev, "dual-mode configuration\n");
1495			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1496			return 0;
1497		}
1498	}
1499
1500	return -EINVAL;
1501}
1502
1503/*
1504 * Check whether a given set of supplies is available in a device tree node.
1505 * This is used to check whether the new or the legacy device tree bindings
1506 * should be used.
1507 */
1508static bool of_regulator_bulk_available(struct device_node *np,
1509					struct regulator_bulk_data *supplies,
1510					unsigned int num_supplies)
1511{
1512	char property[32];
1513	unsigned int i;
1514
1515	for (i = 0; i < num_supplies; i++) {
1516		snprintf(property, 32, "%s-supply", supplies[i].supply);
1517
1518		if (of_find_property(np, property, NULL) == NULL)
1519			return false;
1520	}
1521
1522	return true;
1523}
1524
1525/*
1526 * Old versions of the device tree binding for this device used a set of power
1527 * supplies that didn't match the hardware inputs. This happened to work for a
1528 * number of cases but is not future proof. However to preserve backwards-
1529 * compatibility with old device trees, this function will try to use the old
1530 * set of supplies.
1531 */
1532static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1533{
1534	struct device_node *np = pcie->dev->of_node;
1535
1536	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1537		pcie->num_supplies = 3;
1538	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1539		pcie->num_supplies = 2;
1540
1541	if (pcie->num_supplies == 0) {
1542		dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1543			np->full_name);
1544		return -ENODEV;
1545	}
1546
1547	pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1548				      sizeof(*pcie->supplies),
1549				      GFP_KERNEL);
1550	if (!pcie->supplies)
1551		return -ENOMEM;
1552
1553	pcie->supplies[0].supply = "pex-clk";
1554	pcie->supplies[1].supply = "vdd";
1555
1556	if (pcie->num_supplies > 2)
1557		pcie->supplies[2].supply = "avdd";
1558
1559	return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1560				       pcie->supplies);
1561}
1562
1563/*
1564 * Obtains the list of regulators required for a particular generation of the
1565 * IP block.
1566 *
1567 * This would've been nice to do simply by providing static tables for use
1568 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1569 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1570 * and either seems to be optional depending on which ports are being used.
1571 */
1572static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1573{
1574	struct device_node *np = pcie->dev->of_node;
1575	unsigned int i = 0;
1576
1577	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1578		pcie->num_supplies = 7;
1579
1580		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1581					      sizeof(*pcie->supplies),
1582					      GFP_KERNEL);
1583		if (!pcie->supplies)
1584			return -ENOMEM;
1585
1586		pcie->supplies[i++].supply = "avddio-pex";
1587		pcie->supplies[i++].supply = "dvddio-pex";
1588		pcie->supplies[i++].supply = "avdd-pex-pll";
1589		pcie->supplies[i++].supply = "hvdd-pex";
1590		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1591		pcie->supplies[i++].supply = "vddio-pex-ctl";
1592		pcie->supplies[i++].supply = "avdd-pll-erefe";
1593	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1594		bool need_pexa = false, need_pexb = false;
1595
1596		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1597		if (lane_mask & 0x0f)
1598			need_pexa = true;
1599
1600		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1601		if (lane_mask & 0x30)
1602			need_pexb = true;
1603
1604		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1605					 (need_pexb ? 2 : 0);
1606
1607		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1608					      sizeof(*pcie->supplies),
1609					      GFP_KERNEL);
1610		if (!pcie->supplies)
1611			return -ENOMEM;
1612
1613		pcie->supplies[i++].supply = "avdd-pex-pll";
1614		pcie->supplies[i++].supply = "hvdd-pex";
1615		pcie->supplies[i++].supply = "vddio-pex-ctl";
1616		pcie->supplies[i++].supply = "avdd-plle";
1617
1618		if (need_pexa) {
1619			pcie->supplies[i++].supply = "avdd-pexa";
1620			pcie->supplies[i++].supply = "vdd-pexa";
1621		}
1622
1623		if (need_pexb) {
1624			pcie->supplies[i++].supply = "avdd-pexb";
1625			pcie->supplies[i++].supply = "vdd-pexb";
1626		}
1627	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1628		pcie->num_supplies = 5;
1629
1630		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1631					      sizeof(*pcie->supplies),
1632					      GFP_KERNEL);
1633		if (!pcie->supplies)
1634			return -ENOMEM;
1635
1636		pcie->supplies[0].supply = "avdd-pex";
1637		pcie->supplies[1].supply = "vdd-pex";
1638		pcie->supplies[2].supply = "avdd-pex-pll";
1639		pcie->supplies[3].supply = "avdd-plle";
1640		pcie->supplies[4].supply = "vddio-pex-clk";
1641	}
1642
1643	if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1644					pcie->num_supplies))
1645		return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1646					       pcie->supplies);
1647
1648	/*
1649	 * If not all regulators are available for this new scheme, assume
1650	 * that the device tree complies with an older version of the device
1651	 * tree binding.
1652	 */
1653	dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1654
1655	devm_kfree(pcie->dev, pcie->supplies);
1656	pcie->num_supplies = 0;
1657
1658	return tegra_pcie_get_legacy_regulators(pcie);
1659}
1660
1661static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1662{
1663	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1664	struct device_node *np = pcie->dev->of_node, *port;
1665	struct of_pci_range_parser parser;
1666	struct of_pci_range range;
1667	u32 lanes = 0, mask = 0;
1668	unsigned int lane = 0;
1669	struct resource res;
1670	int err;
1671
1672	memset(&pcie->all, 0, sizeof(pcie->all));
1673	pcie->all.flags = IORESOURCE_MEM;
1674	pcie->all.name = np->full_name;
1675	pcie->all.start = ~0;
1676	pcie->all.end = 0;
1677
1678	if (of_pci_range_parser_init(&parser, np)) {
1679		dev_err(pcie->dev, "missing \"ranges\" property\n");
1680		return -EINVAL;
1681	}
1682
1683	for_each_of_pci_range(&parser, &range) {
1684		err = of_pci_range_to_resource(&range, np, &res);
1685		if (err < 0)
1686			return err;
1687
1688		switch (res.flags & IORESOURCE_TYPE_BITS) {
1689		case IORESOURCE_IO:
1690			memcpy(&pcie->pio, &res, sizeof(res));
1691			pcie->pio.name = np->full_name;
1692
1693			/*
1694			 * The Tegra PCIe host bridge uses this to program the
1695			 * mapping of the I/O space to the physical address,
1696			 * so we override the .start and .end fields here that
1697			 * of_pci_range_to_resource() converted to I/O space.
1698			 * We also set the IORESOURCE_MEM type to clarify that
1699			 * the resource is in the physical memory space.
1700			 */
1701			pcie->io.start = range.cpu_addr;
1702			pcie->io.end = range.cpu_addr + range.size - 1;
1703			pcie->io.flags = IORESOURCE_MEM;
1704			pcie->io.name = "I/O";
1705
1706			memcpy(&res, &pcie->io, sizeof(res));
1707			break;
1708
1709		case IORESOURCE_MEM:
1710			if (res.flags & IORESOURCE_PREFETCH) {
1711				memcpy(&pcie->prefetch, &res, sizeof(res));
1712				pcie->prefetch.name = "prefetchable";
1713			} else {
1714				memcpy(&pcie->mem, &res, sizeof(res));
1715				pcie->mem.name = "non-prefetchable";
1716			}
1717			break;
1718		}
1719
1720		if (res.start <= pcie->all.start)
1721			pcie->all.start = res.start;
1722
1723		if (res.end >= pcie->all.end)
1724			pcie->all.end = res.end;
1725	}
1726
1727	err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1728	if (err < 0)
1729		return err;
1730
1731	err = of_pci_parse_bus_range(np, &pcie->busn);
1732	if (err < 0) {
1733		dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1734			err);
1735		pcie->busn.name = np->name;
1736		pcie->busn.start = 0;
1737		pcie->busn.end = 0xff;
1738		pcie->busn.flags = IORESOURCE_BUS;
1739	}
1740
1741	/* parse root ports */
1742	for_each_child_of_node(np, port) {
1743		struct tegra_pcie_port *rp;
1744		unsigned int index;
1745		u32 value;
1746
1747		err = of_pci_get_devfn(port);
1748		if (err < 0) {
1749			dev_err(pcie->dev, "failed to parse address: %d\n",
1750				err);
1751			return err;
1752		}
1753
1754		index = PCI_SLOT(err);
1755
1756		if (index < 1 || index > soc->num_ports) {
1757			dev_err(pcie->dev, "invalid port number: %d\n", index);
1758			return -EINVAL;
1759		}
1760
1761		index--;
1762
1763		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1764		if (err < 0) {
1765			dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1766				err);
1767			return err;
1768		}
1769
1770		if (value > 16) {
1771			dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1772			return -EINVAL;
1773		}
1774
1775		lanes |= value << (index << 3);
1776
1777		if (!of_device_is_available(port)) {
1778			lane += value;
1779			continue;
1780		}
1781
1782		mask |= ((1 << value) - 1) << lane;
1783		lane += value;
1784
1785		rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1786		if (!rp)
1787			return -ENOMEM;
1788
1789		err = of_address_to_resource(port, 0, &rp->regs);
1790		if (err < 0) {
1791			dev_err(pcie->dev, "failed to parse address: %d\n",
1792				err);
1793			return err;
1794		}
1795
1796		INIT_LIST_HEAD(&rp->list);
1797		rp->index = index;
1798		rp->lanes = value;
1799		rp->pcie = pcie;
1800
1801		rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1802		if (IS_ERR(rp->base))
1803			return PTR_ERR(rp->base);
1804
1805		list_add_tail(&rp->list, &pcie->ports);
1806	}
1807
1808	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1809	if (err < 0) {
1810		dev_err(pcie->dev, "invalid lane configuration\n");
1811		return err;
1812	}
1813
1814	err = tegra_pcie_get_regulators(pcie, mask);
1815	if (err < 0)
1816		return err;
1817
1818	return 0;
1819}
1820
1821/*
1822 * FIXME: If there are no PCIe cards attached, then calling this function
1823 * can result in the increase of the bootup time as there are big timeout
1824 * loops.
1825 */
1826#define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
1827static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1828{
1829	unsigned int retries = 3;
1830	unsigned long value;
1831
1832	/* override presence detection */
1833	value = readl(port->base + RP_PRIV_MISC);
1834	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1835	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1836	writel(value, port->base + RP_PRIV_MISC);
1837
1838	do {
1839		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1840
1841		do {
1842			value = readl(port->base + RP_VEND_XP);
1843
1844			if (value & RP_VEND_XP_DL_UP)
1845				break;
1846
1847			usleep_range(1000, 2000);
1848		} while (--timeout);
1849
1850		if (!timeout) {
1851			dev_err(port->pcie->dev, "link %u down, retrying\n",
1852				port->index);
1853			goto retry;
1854		}
1855
1856		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1857
1858		do {
1859			value = readl(port->base + RP_LINK_CONTROL_STATUS);
1860
1861			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1862				return true;
1863
1864			usleep_range(1000, 2000);
1865		} while (--timeout);
1866
1867retry:
1868		tegra_pcie_port_reset(port);
1869	} while (--retries);
1870
1871	return false;
1872}
1873
1874static int tegra_pcie_enable(struct tegra_pcie *pcie)
1875{
1876	struct tegra_pcie_port *port, *tmp;
1877	struct hw_pci hw;
1878
1879	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1880		dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1881			 port->index, port->lanes);
1882
1883		tegra_pcie_port_enable(port);
1884
1885		if (tegra_pcie_port_check_link(port))
1886			continue;
1887
1888		dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1889
1890		tegra_pcie_port_disable(port);
1891		tegra_pcie_port_free(port);
1892	}
1893
1894	memset(&hw, 0, sizeof(hw));
1895
1896	hw.nr_controllers = 1;
1897	hw.private_data = (void **)&pcie;
1898	hw.setup = tegra_pcie_setup;
1899	hw.map_irq = tegra_pcie_map_irq;
1900	hw.add_bus = tegra_pcie_add_bus;
1901	hw.scan = tegra_pcie_scan_bus;
1902	hw.ops = &tegra_pcie_ops;
1903
1904	pci_common_init_dev(pcie->dev, &hw);
1905
1906	return 0;
1907}
1908
1909static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1910	.num_ports = 2,
1911	.msi_base_shift = 0,
1912	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1913	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1914	.has_pex_clkreq_en = false,
1915	.has_pex_bias_ctrl = false,
1916	.has_intr_prsnt_sense = false,
1917	.has_cml_clk = false,
1918	.has_gen2 = false,
1919};
1920
1921static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1922	.num_ports = 3,
1923	.msi_base_shift = 8,
1924	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1925	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1926	.has_pex_clkreq_en = true,
1927	.has_pex_bias_ctrl = true,
1928	.has_intr_prsnt_sense = true,
1929	.has_cml_clk = true,
1930	.has_gen2 = false,
1931};
1932
1933static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1934	.num_ports = 2,
1935	.msi_base_shift = 8,
1936	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1937	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1938	.has_pex_clkreq_en = true,
1939	.has_pex_bias_ctrl = true,
1940	.has_intr_prsnt_sense = true,
1941	.has_cml_clk = true,
1942	.has_gen2 = true,
1943};
1944
1945static const struct of_device_id tegra_pcie_of_match[] = {
1946	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1947	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1948	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1949	{ },
1950};
1951MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1952
1953static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1954{
1955	struct tegra_pcie *pcie = s->private;
1956
1957	if (list_empty(&pcie->ports))
1958		return NULL;
1959
1960	seq_printf(s, "Index  Status\n");
1961
1962	return seq_list_start(&pcie->ports, *pos);
1963}
1964
1965static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1966{
1967	struct tegra_pcie *pcie = s->private;
1968
1969	return seq_list_next(v, &pcie->ports, pos);
1970}
1971
1972static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1973{
1974}
1975
1976static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1977{
1978	bool up = false, active = false;
1979	struct tegra_pcie_port *port;
1980	unsigned int value;
1981
1982	port = list_entry(v, struct tegra_pcie_port, list);
1983
1984	value = readl(port->base + RP_VEND_XP);
1985
1986	if (value & RP_VEND_XP_DL_UP)
1987		up = true;
1988
1989	value = readl(port->base + RP_LINK_CONTROL_STATUS);
1990
1991	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1992		active = true;
1993
1994	seq_printf(s, "%2u     ", port->index);
1995
1996	if (up)
1997		seq_printf(s, "up");
1998
1999	if (active) {
2000		if (up)
2001			seq_printf(s, ", ");
2002
2003		seq_printf(s, "active");
2004	}
2005
2006	seq_printf(s, "\n");
2007	return 0;
2008}
2009
2010static const struct seq_operations tegra_pcie_ports_seq_ops = {
2011	.start = tegra_pcie_ports_seq_start,
2012	.next = tegra_pcie_ports_seq_next,
2013	.stop = tegra_pcie_ports_seq_stop,
2014	.show = tegra_pcie_ports_seq_show,
2015};
2016
2017static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2018{
2019	struct tegra_pcie *pcie = inode->i_private;
2020	struct seq_file *s;
2021	int err;
2022
2023	err = seq_open(file, &tegra_pcie_ports_seq_ops);
2024	if (err)
2025		return err;
2026
2027	s = file->private_data;
2028	s->private = pcie;
2029
2030	return 0;
2031}
2032
2033static const struct file_operations tegra_pcie_ports_ops = {
2034	.owner = THIS_MODULE,
2035	.open = tegra_pcie_ports_open,
2036	.read = seq_read,
2037	.llseek = seq_lseek,
2038	.release = seq_release,
2039};
2040
2041static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2042{
2043	struct dentry *file;
2044
2045	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2046	if (!pcie->debugfs)
2047		return -ENOMEM;
2048
2049	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2050				   pcie, &tegra_pcie_ports_ops);
2051	if (!file)
2052		goto remove;
2053
2054	return 0;
2055
2056remove:
2057	debugfs_remove_recursive(pcie->debugfs);
2058	pcie->debugfs = NULL;
2059	return -ENOMEM;
2060}
2061
2062static int tegra_pcie_probe(struct platform_device *pdev)
2063{
2064	const struct of_device_id *match;
2065	struct tegra_pcie *pcie;
2066	int err;
2067
2068	match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2069	if (!match)
2070		return -ENODEV;
2071
2072	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2073	if (!pcie)
2074		return -ENOMEM;
2075
2076	INIT_LIST_HEAD(&pcie->buses);
2077	INIT_LIST_HEAD(&pcie->ports);
2078	pcie->soc_data = match->data;
2079	pcie->dev = &pdev->dev;
2080
2081	err = tegra_pcie_parse_dt(pcie);
2082	if (err < 0)
2083		return err;
2084
2085	pcibios_min_mem = 0;
2086
2087	err = tegra_pcie_get_resources(pcie);
2088	if (err < 0) {
2089		dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2090		return err;
2091	}
2092
2093	err = tegra_pcie_enable_controller(pcie);
2094	if (err)
2095		goto put_resources;
2096
2097	/* setup the AFI address translations */
2098	tegra_pcie_setup_translations(pcie);
2099
2100	if (IS_ENABLED(CONFIG_PCI_MSI)) {
2101		err = tegra_pcie_enable_msi(pcie);
2102		if (err < 0) {
2103			dev_err(&pdev->dev,
2104				"failed to enable MSI support: %d\n",
2105				err);
2106			goto put_resources;
2107		}
2108	}
2109
2110	err = tegra_pcie_enable(pcie);
2111	if (err < 0) {
2112		dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2113		goto disable_msi;
2114	}
2115
2116	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2117		err = tegra_pcie_debugfs_init(pcie);
2118		if (err < 0)
2119			dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2120				err);
2121	}
2122
2123	platform_set_drvdata(pdev, pcie);
2124	return 0;
2125
2126disable_msi:
2127	if (IS_ENABLED(CONFIG_PCI_MSI))
2128		tegra_pcie_disable_msi(pcie);
2129put_resources:
2130	tegra_pcie_put_resources(pcie);
2131	return err;
2132}
2133
2134static struct platform_driver tegra_pcie_driver = {
2135	.driver = {
2136		.name = "tegra-pcie",
2137		.owner = THIS_MODULE,
2138		.of_match_table = tegra_pcie_of_match,
2139		.suppress_bind_attrs = true,
2140	},
2141	.probe = tegra_pcie_probe,
2142};
2143module_platform_driver(tegra_pcie_driver);
2144
2145MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2146MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
2147MODULE_LICENSE("GPL v2");
2148