[go: nahoru, domu]

1/*
2 * File:	pci-acpi.c
3 * Purpose:	Provide PCI support in ACPI
4 *
5 * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
6 * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
7 * Copyright (C) 2004 Intel Corp.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/pci.h>
13#include <linux/pci_hotplug.h>
14#include <linux/module.h>
15#include <linux/pci-aspm.h>
16#include <linux/pci-acpi.h>
17#include <linux/pm_runtime.h>
18#include <linux/pm_qos.h>
19#include "pci.h"
20
21phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
22{
23	acpi_status status = AE_NOT_EXIST;
24	unsigned long long mcfg_addr;
25
26	if (handle)
27		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
28					       NULL, &mcfg_addr);
29	if (ACPI_FAILURE(status))
30		return 0;
31
32	return (phys_addr_t)mcfg_addr;
33}
34
35static acpi_status decode_type0_hpx_record(union acpi_object *record,
36					   struct hotplug_params *hpx)
37{
38	int i;
39	union acpi_object *fields = record->package.elements;
40	u32 revision = fields[1].integer.value;
41
42	switch (revision) {
43	case 1:
44		if (record->package.count != 6)
45			return AE_ERROR;
46		for (i = 2; i < 6; i++)
47			if (fields[i].type != ACPI_TYPE_INTEGER)
48				return AE_ERROR;
49		hpx->t0 = &hpx->type0_data;
50		hpx->t0->revision        = revision;
51		hpx->t0->cache_line_size = fields[2].integer.value;
52		hpx->t0->latency_timer   = fields[3].integer.value;
53		hpx->t0->enable_serr     = fields[4].integer.value;
54		hpx->t0->enable_perr     = fields[5].integer.value;
55		break;
56	default:
57		printk(KERN_WARNING
58		       "%s: Type 0 Revision %d record not supported\n",
59		       __func__, revision);
60		return AE_ERROR;
61	}
62	return AE_OK;
63}
64
65static acpi_status decode_type1_hpx_record(union acpi_object *record,
66					   struct hotplug_params *hpx)
67{
68	int i;
69	union acpi_object *fields = record->package.elements;
70	u32 revision = fields[1].integer.value;
71
72	switch (revision) {
73	case 1:
74		if (record->package.count != 5)
75			return AE_ERROR;
76		for (i = 2; i < 5; i++)
77			if (fields[i].type != ACPI_TYPE_INTEGER)
78				return AE_ERROR;
79		hpx->t1 = &hpx->type1_data;
80		hpx->t1->revision      = revision;
81		hpx->t1->max_mem_read  = fields[2].integer.value;
82		hpx->t1->avg_max_split = fields[3].integer.value;
83		hpx->t1->tot_max_split = fields[4].integer.value;
84		break;
85	default:
86		printk(KERN_WARNING
87		       "%s: Type 1 Revision %d record not supported\n",
88		       __func__, revision);
89		return AE_ERROR;
90	}
91	return AE_OK;
92}
93
94static acpi_status decode_type2_hpx_record(union acpi_object *record,
95					   struct hotplug_params *hpx)
96{
97	int i;
98	union acpi_object *fields = record->package.elements;
99	u32 revision = fields[1].integer.value;
100
101	switch (revision) {
102	case 1:
103		if (record->package.count != 18)
104			return AE_ERROR;
105		for (i = 2; i < 18; i++)
106			if (fields[i].type != ACPI_TYPE_INTEGER)
107				return AE_ERROR;
108		hpx->t2 = &hpx->type2_data;
109		hpx->t2->revision      = revision;
110		hpx->t2->unc_err_mask_and      = fields[2].integer.value;
111		hpx->t2->unc_err_mask_or       = fields[3].integer.value;
112		hpx->t2->unc_err_sever_and     = fields[4].integer.value;
113		hpx->t2->unc_err_sever_or      = fields[5].integer.value;
114		hpx->t2->cor_err_mask_and      = fields[6].integer.value;
115		hpx->t2->cor_err_mask_or       = fields[7].integer.value;
116		hpx->t2->adv_err_cap_and       = fields[8].integer.value;
117		hpx->t2->adv_err_cap_or        = fields[9].integer.value;
118		hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
119		hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
120		hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
121		hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
122		hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
123		hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
124		hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
125		hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
126		break;
127	default:
128		printk(KERN_WARNING
129		       "%s: Type 2 Revision %d record not supported\n",
130		       __func__, revision);
131		return AE_ERROR;
132	}
133	return AE_OK;
134}
135
136static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
137{
138	acpi_status status;
139	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
140	union acpi_object *package, *record, *fields;
141	u32 type;
142	int i;
143
144	/* Clear the return buffer with zeros */
145	memset(hpx, 0, sizeof(struct hotplug_params));
146
147	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
148	if (ACPI_FAILURE(status))
149		return status;
150
151	package = (union acpi_object *)buffer.pointer;
152	if (package->type != ACPI_TYPE_PACKAGE) {
153		status = AE_ERROR;
154		goto exit;
155	}
156
157	for (i = 0; i < package->package.count; i++) {
158		record = &package->package.elements[i];
159		if (record->type != ACPI_TYPE_PACKAGE) {
160			status = AE_ERROR;
161			goto exit;
162		}
163
164		fields = record->package.elements;
165		if (fields[0].type != ACPI_TYPE_INTEGER ||
166		    fields[1].type != ACPI_TYPE_INTEGER) {
167			status = AE_ERROR;
168			goto exit;
169		}
170
171		type = fields[0].integer.value;
172		switch (type) {
173		case 0:
174			status = decode_type0_hpx_record(record, hpx);
175			if (ACPI_FAILURE(status))
176				goto exit;
177			break;
178		case 1:
179			status = decode_type1_hpx_record(record, hpx);
180			if (ACPI_FAILURE(status))
181				goto exit;
182			break;
183		case 2:
184			status = decode_type2_hpx_record(record, hpx);
185			if (ACPI_FAILURE(status))
186				goto exit;
187			break;
188		default:
189			printk(KERN_ERR "%s: Type %d record not supported\n",
190			       __func__, type);
191			status = AE_ERROR;
192			goto exit;
193		}
194	}
195 exit:
196	kfree(buffer.pointer);
197	return status;
198}
199
200static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
201{
202	acpi_status status;
203	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
204	union acpi_object *package, *fields;
205	int i;
206
207	memset(hpp, 0, sizeof(struct hotplug_params));
208
209	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
210	if (ACPI_FAILURE(status))
211		return status;
212
213	package = (union acpi_object *) buffer.pointer;
214	if (package->type != ACPI_TYPE_PACKAGE ||
215	    package->package.count != 4) {
216		status = AE_ERROR;
217		goto exit;
218	}
219
220	fields = package->package.elements;
221	for (i = 0; i < 4; i++) {
222		if (fields[i].type != ACPI_TYPE_INTEGER) {
223			status = AE_ERROR;
224			goto exit;
225		}
226	}
227
228	hpp->t0 = &hpp->type0_data;
229	hpp->t0->revision        = 1;
230	hpp->t0->cache_line_size = fields[0].integer.value;
231	hpp->t0->latency_timer   = fields[1].integer.value;
232	hpp->t0->enable_serr     = fields[2].integer.value;
233	hpp->t0->enable_perr     = fields[3].integer.value;
234
235exit:
236	kfree(buffer.pointer);
237	return status;
238}
239
240/* pci_get_hp_params
241 *
242 * @dev - the pci_dev for which we want parameters
243 * @hpp - allocated by the caller
244 */
245int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
246{
247	acpi_status status;
248	acpi_handle handle, phandle;
249	struct pci_bus *pbus;
250
251	handle = NULL;
252	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
253		handle = acpi_pci_get_bridge_handle(pbus);
254		if (handle)
255			break;
256	}
257
258	/*
259	 * _HPP settings apply to all child buses, until another _HPP is
260	 * encountered. If we don't find an _HPP for the input pci dev,
261	 * look for it in the parent device scope since that would apply to
262	 * this pci dev.
263	 */
264	while (handle) {
265		status = acpi_run_hpx(handle, hpp);
266		if (ACPI_SUCCESS(status))
267			return 0;
268		status = acpi_run_hpp(handle, hpp);
269		if (ACPI_SUCCESS(status))
270			return 0;
271		if (acpi_is_root_bridge(handle))
272			break;
273		status = acpi_get_parent(handle, &phandle);
274		if (ACPI_FAILURE(status))
275			break;
276		handle = phandle;
277	}
278	return -ENODEV;
279}
280EXPORT_SYMBOL_GPL(pci_get_hp_params);
281
282/**
283 * pci_acpi_wake_bus - Root bus wakeup notification fork function.
284 * @work: Work item to handle.
285 */
286static void pci_acpi_wake_bus(struct work_struct *work)
287{
288	struct acpi_device *adev;
289	struct acpi_pci_root *root;
290
291	adev = container_of(work, struct acpi_device, wakeup.context.work);
292	root = acpi_driver_data(adev);
293	pci_pme_wakeup_bus(root->bus);
294}
295
296/**
297 * pci_acpi_wake_dev - PCI device wakeup notification work function.
298 * @handle: ACPI handle of a device the notification is for.
299 * @work: Work item to handle.
300 */
301static void pci_acpi_wake_dev(struct work_struct *work)
302{
303	struct acpi_device_wakeup_context *context;
304	struct pci_dev *pci_dev;
305
306	context = container_of(work, struct acpi_device_wakeup_context, work);
307	pci_dev = to_pci_dev(context->dev);
308
309	if (pci_dev->pme_poll)
310		pci_dev->pme_poll = false;
311
312	if (pci_dev->current_state == PCI_D3cold) {
313		pci_wakeup_event(pci_dev);
314		pm_runtime_resume(&pci_dev->dev);
315		return;
316	}
317
318	/* Clear PME Status if set. */
319	if (pci_dev->pme_support)
320		pci_check_pme_status(pci_dev);
321
322	pci_wakeup_event(pci_dev);
323	pm_runtime_resume(&pci_dev->dev);
324
325	if (pci_dev->subordinate)
326		pci_pme_wakeup_bus(pci_dev->subordinate);
327}
328
329/**
330 * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
331 * @dev: PCI root bridge ACPI device.
332 */
333acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
334{
335	return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
336}
337
338/**
339 * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
340 * @dev: ACPI device to add the notifier for.
341 * @pci_dev: PCI device to check for the PME status if an event is signaled.
342 */
343acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
344				     struct pci_dev *pci_dev)
345{
346	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
347}
348
349/*
350 * _SxD returns the D-state with the highest power
351 * (lowest D-state number) supported in the S-state "x".
352 *
353 * If the devices does not have a _PRW
354 * (Power Resources for Wake) supporting system wakeup from "x"
355 * then the OS is free to choose a lower power (higher number
356 * D-state) than the return value from _SxD.
357 *
358 * But if _PRW is enabled at S-state "x", the OS
359 * must not choose a power lower than _SxD --
360 * unless the device has an _SxW method specifying
361 * the lowest power (highest D-state number) the device
362 * may enter while still able to wake the system.
363 *
364 * ie. depending on global OS policy:
365 *
366 * if (_PRW at S-state x)
367 *	choose from highest power _SxD to lowest power _SxW
368 * else // no _PRW at S-state x
369 *	choose highest power _SxD or any lower power
370 */
371
372static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
373{
374	int acpi_state, d_max;
375
376	if (pdev->no_d3cold)
377		d_max = ACPI_STATE_D3_HOT;
378	else
379		d_max = ACPI_STATE_D3_COLD;
380	acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
381	if (acpi_state < 0)
382		return PCI_POWER_ERROR;
383
384	switch (acpi_state) {
385	case ACPI_STATE_D0:
386		return PCI_D0;
387	case ACPI_STATE_D1:
388		return PCI_D1;
389	case ACPI_STATE_D2:
390		return PCI_D2;
391	case ACPI_STATE_D3_HOT:
392		return PCI_D3hot;
393	case ACPI_STATE_D3_COLD:
394		return PCI_D3cold;
395	}
396	return PCI_POWER_ERROR;
397}
398
399static bool acpi_pci_power_manageable(struct pci_dev *dev)
400{
401	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
402	return adev ? acpi_device_power_manageable(adev) : false;
403}
404
405static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
406{
407	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
408	static const u8 state_conv[] = {
409		[PCI_D0] = ACPI_STATE_D0,
410		[PCI_D1] = ACPI_STATE_D1,
411		[PCI_D2] = ACPI_STATE_D2,
412		[PCI_D3hot] = ACPI_STATE_D3_COLD,
413		[PCI_D3cold] = ACPI_STATE_D3_COLD,
414	};
415	int error = -EINVAL;
416
417	/* If the ACPI device has _EJ0, ignore the device */
418	if (!adev || acpi_has_method(adev->handle, "_EJ0"))
419		return -ENODEV;
420
421	switch (state) {
422	case PCI_D3cold:
423		if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
424				PM_QOS_FLAGS_ALL) {
425			error = -EBUSY;
426			break;
427		}
428	case PCI_D0:
429	case PCI_D1:
430	case PCI_D2:
431	case PCI_D3hot:
432		error = acpi_device_set_power(adev, state_conv[state]);
433	}
434
435	if (!error)
436		dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
437			 acpi_power_state_string(state_conv[state]));
438
439	return error;
440}
441
442static bool acpi_pci_can_wakeup(struct pci_dev *dev)
443{
444	struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
445	return adev ? acpi_device_can_wakeup(adev) : false;
446}
447
448static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
449{
450	while (bus->parent) {
451		if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
452			return;
453		bus = bus->parent;
454	}
455
456	/* We have reached the root bus. */
457	if (bus->bridge)
458		acpi_pm_device_sleep_wake(bus->bridge, enable);
459}
460
461static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
462{
463	if (acpi_pci_can_wakeup(dev))
464		return acpi_pm_device_sleep_wake(&dev->dev, enable);
465
466	acpi_pci_propagate_wakeup_enable(dev->bus, enable);
467	return 0;
468}
469
470static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
471{
472	while (bus->parent) {
473		struct pci_dev *bridge = bus->self;
474
475		if (bridge->pme_interrupt)
476			return;
477		if (!acpi_pm_device_run_wake(&bridge->dev, enable))
478			return;
479		bus = bus->parent;
480	}
481
482	/* We have reached the root bus. */
483	if (bus->bridge)
484		acpi_pm_device_run_wake(bus->bridge, enable);
485}
486
487static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
488{
489	/*
490	 * Per PCI Express Base Specification Revision 2.0 section
491	 * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
492	 * waking up to power on the main link even if there is PME
493	 * support for D3cold
494	 */
495	if (dev->pme_interrupt && !dev->runtime_d3cold)
496		return 0;
497
498	if (!acpi_pm_device_run_wake(&dev->dev, enable))
499		return 0;
500
501	acpi_pci_propagate_run_wake(dev->bus, enable);
502	return 0;
503}
504
505static struct pci_platform_pm_ops acpi_pci_platform_pm = {
506	.is_manageable = acpi_pci_power_manageable,
507	.set_state = acpi_pci_set_power_state,
508	.choose_state = acpi_pci_choose_state,
509	.sleep_wake = acpi_pci_sleep_wake,
510	.run_wake = acpi_pci_run_wake,
511};
512
513void acpi_pci_add_bus(struct pci_bus *bus)
514{
515	if (acpi_pci_disabled || !bus->bridge)
516		return;
517
518	acpi_pci_slot_enumerate(bus);
519	acpiphp_enumerate_slots(bus);
520}
521
522void acpi_pci_remove_bus(struct pci_bus *bus)
523{
524	if (acpi_pci_disabled || !bus->bridge)
525		return;
526
527	acpiphp_remove_slots(bus);
528	acpi_pci_slot_remove(bus);
529}
530
531/* ACPI bus type */
532static struct acpi_device *acpi_pci_find_companion(struct device *dev)
533{
534	struct pci_dev *pci_dev = to_pci_dev(dev);
535	bool check_children;
536	u64 addr;
537
538	check_children = pci_is_bridge(pci_dev);
539	/* Please ref to ACPI spec for the syntax of _ADR */
540	addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
541	return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
542				      check_children);
543}
544
545static void pci_acpi_setup(struct device *dev)
546{
547	struct pci_dev *pci_dev = to_pci_dev(dev);
548	struct acpi_device *adev = ACPI_COMPANION(dev);
549
550	if (!adev)
551		return;
552
553	pci_acpi_add_pm_notifier(adev, pci_dev);
554	if (!adev->wakeup.flags.valid)
555		return;
556
557	device_set_wakeup_capable(dev, true);
558	acpi_pci_sleep_wake(pci_dev, false);
559	if (adev->wakeup.flags.run_wake)
560		device_set_run_wake(dev, true);
561}
562
563static void pci_acpi_cleanup(struct device *dev)
564{
565	struct acpi_device *adev = ACPI_COMPANION(dev);
566
567	if (!adev)
568		return;
569
570	pci_acpi_remove_pm_notifier(adev);
571	if (adev->wakeup.flags.valid) {
572		device_set_wakeup_capable(dev, false);
573		device_set_run_wake(dev, false);
574	}
575}
576
577static bool pci_acpi_bus_match(struct device *dev)
578{
579	return dev_is_pci(dev);
580}
581
582static struct acpi_bus_type acpi_pci_bus = {
583	.name = "PCI",
584	.match = pci_acpi_bus_match,
585	.find_companion = acpi_pci_find_companion,
586	.setup = pci_acpi_setup,
587	.cleanup = pci_acpi_cleanup,
588};
589
590static int __init acpi_pci_init(void)
591{
592	int ret;
593
594	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
595		pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
596		pci_no_msi();
597	}
598
599	if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
600		pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
601		pcie_no_aspm();
602	}
603
604	ret = register_acpi_bus_type(&acpi_pci_bus);
605	if (ret)
606		return 0;
607
608	pci_set_platform_pm(&acpi_pci_platform_pm);
609	acpi_pci_slot_init();
610	acpiphp_init();
611
612	return 0;
613}
614arch_initcall(acpi_pci_init);
615