summaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/dmar.c61
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c117
-rw-r--r--drivers/pci/hotplug/acpiphp.h3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c187
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c11
-rw-r--r--drivers/pci/hotplug/cpqphp.h1
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp.h116
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c24
-rw-r--r--drivers/pci/hotplug/pciehp_core.c136
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c114
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c119
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c160
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c187
-rw-r--r--drivers/pci/hotplug/shpchp.h9
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c62
-rw-r--r--drivers/pci/intel-iommu.c416
-rw-r--r--drivers/pci/intr_remapping.c8
-rw-r--r--drivers/pci/iova.c16
-rw-r--r--drivers/pci/legacy.c34
-rw-r--r--drivers/pci/msi.c283
-rw-r--r--drivers/pci/pci-acpi.c29
-rw-r--r--drivers/pci/pci-driver.c132
-rw-r--r--drivers/pci/pci-stub.c45
-rw-r--r--drivers/pci/pci-sysfs.c37
-rw-r--r--drivers/pci/pci.c125
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c25
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c27
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h34
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c107
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c190
-rw-r--r--drivers/pci/pcie/aspm.c492
-rw-r--r--drivers/pci/pcie/portdrv_core.c6
-rw-r--r--drivers/pci/pcie/portdrv_pci.c4
-rw-r--r--drivers/pci/probe.c33
-rw-r--r--drivers/pci/quirks.c68
-rw-r--r--drivers/pci/search.c31
-rw-r--r--drivers/pci/setup-bus.c35
-rw-r--r--drivers/pci/setup-res.c38
42 files changed, 1778 insertions, 1754 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1ebd6b4c743b..4a7f11d8f432 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -8,6 +8,9 @@ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_PCI_LEGACY) += legacy.o
+CFLAGS_legacy.o += -Wno-deprecated-declarations
+
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 7b287cb38b7a..22b02c6df854 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -33,9 +33,10 @@
#include <linux/timer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
-#undef PREFIX
-#define PREFIX "DMAR:"
+#define PREFIX "DMAR: "
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
@@ -353,6 +354,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_atsr *atsr;
+ struct acpi_dmar_rhsa *rhsa;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@@ -374,6 +376,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
atsr = container_of(header, struct acpi_dmar_atsr, header);
printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
break;
+ case ACPI_DMAR_HARDWARE_AFFINITY:
+ rhsa = container_of(header, struct acpi_dmar_rhsa, header);
+ printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+ (unsigned long long)rhsa->base_address,
+ rhsa->proximity_domain);
+ break;
}
}
@@ -413,6 +421,12 @@ parse_dmar_table(void)
*/
dmar_table_detect();
+ /*
+ * ACPI tables may not be DMA protected by tboot, so use DMAR copy
+ * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
+ */
+ dmar_tbl = tboot_get_dmar_table(dmar_tbl);
+
dmar = (struct acpi_table_dmar *)dmar_tbl;
if (!dmar)
return -ENODEV;
@@ -452,9 +466,13 @@ parse_dmar_table(void)
ret = dmar_parse_one_atsr(entry_header);
#endif
break;
+ case ACPI_DMAR_HARDWARE_AFFINITY:
+ /* We don't do anything with RHSA (yet?) */
+ break;
default:
printk(KERN_WARNING PREFIX
- "Unknown DMAR structure type\n");
+ "Unknown DMAR structure type %d\n",
+ entry_header->type);
ret = 0; /* for forward compatibility */
break;
}
@@ -570,9 +588,6 @@ int __init dmar_table_init(void)
printk(KERN_INFO PREFIX "No ATSR found\n");
#endif
-#ifdef CONFIG_INTR_REMAP
- parse_ioapics_under_ir();
-#endif
return 0;
}
@@ -632,20 +647,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+ if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+ /* Promote an attitude of violence to a BIOS engineer today */
+ WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ drhd->reg_base_addr,
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ goto err_unmap;
+ }
+
#ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
- goto error;
+ goto err_unmap;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
- goto error;
+ goto err_unmap;
}
#endif
iommu->agaw = agaw;
@@ -665,7 +691,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
}
ver = readl(iommu->reg + DMAR_VER_REG);
- pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
+ pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
(unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap,
@@ -675,7 +701,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu;
return 0;
-error:
+
+ err_unmap:
+ iounmap(iommu->reg);
+ error:
kfree(iommu);
return -1;
}
@@ -1212,7 +1241,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id, guest_addr);
fault_index++;
- if (fault_index > cap_num_fault_regs(iommu->cap))
+ if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag);
}
@@ -1305,3 +1334,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return 0;
}
+
+/*
+ * Check interrupt remapping support in DMAR table description.
+ */
+int dmar_ir_support(void)
+{
+ struct acpi_table_dmar *dmar;
+ dmar = (struct acpi_table_dmar *)dmar_tbl;
+ return dmar->flags & 0x1;
+}
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 2aa117c8cd87..3625b094bf7e 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
# Link this last so it doesn't claim devices that have a real hotplug driver
obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o
-pci_hotplug-objs := pci_hotplug_core.o
+pci_hotplug-objs := pci_hotplug_core.o pcihp_slot.o
ifdef CONFIG_HOTPLUG_PCI_CPCI
pci_hotplug-objs += cpci_hotplug_core.o \
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index eb159587d0bf..a73028ec52e5 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -41,7 +41,6 @@
#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
#define METHOD_NAME__SUN "_SUN"
-#define METHOD_NAME__HPP "_HPP"
#define METHOD_NAME_OSHP "OSHP"
static int debug_acpi;
@@ -215,80 +214,41 @@ acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
static acpi_status
acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
{
- acpi_status status;
- u8 nui[4];
- struct acpi_buffer ret_buf = { 0, NULL};
- struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *ext_obj, *package;
- int i, len = 0;
-
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *package, *fields;
+ int i;
- /* Clear the return buffer with zeros */
memset(hpp, 0, sizeof(struct hotplug_params));
- /* get _hpp */
- status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf);
- switch (status) {
- case AE_BUFFER_OVERFLOW:
- ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL);
- if (!ret_buf.pointer) {
- printk(KERN_ERR "%s:%s alloc for _HPP fail\n",
- __func__, (char *)string.pointer);
- kfree(string.pointer);
- return AE_NO_MEMORY;
- }
- status = acpi_evaluate_object(handle, METHOD_NAME__HPP,
- NULL, &ret_buf);
- if (ACPI_SUCCESS(status))
- break;
- default:
- if (ACPI_FAILURE(status)) {
- pr_debug("%s:%s _HPP fail=0x%x\n", __func__,
- (char *)string.pointer, status);
- kfree(string.pointer);
- return status;
- }
- }
+ status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
- ext_obj = (union acpi_object *) ret_buf.pointer;
- if (ext_obj->type != ACPI_TYPE_PACKAGE) {
- printk(KERN_ERR "%s:%s _HPP obj not a package\n", __func__,
- (char *)string.pointer);
+ package = (union acpi_object *) buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE ||
+ package->package.count != 4) {
status = AE_ERROR;
- goto free_and_return;
+ goto exit;
}
- len = ext_obj->package.count;
- package = (union acpi_object *) ret_buf.pointer;
- for ( i = 0; (i < len) || (i < 4); i++) {
- ext_obj = (union acpi_object *) &package->package.elements[i];
- switch (ext_obj->type) {
- case ACPI_TYPE_INTEGER:
- nui[i] = (u8)ext_obj->integer.value;
- break;
- default:
- printk(KERN_ERR "%s:%s _HPP obj type incorrect\n",
- __func__, (char *)string.pointer);
+ fields = package->package.elements;
+ for (i = 0; i < 4; i++) {
+ if (fields[i].type != ACPI_TYPE_INTEGER) {
status = AE_ERROR;
- goto free_and_return;
+ goto exit;
}
}
hpp->t0 = &hpp->type0_data;
- hpp->t0->cache_line_size = nui[0];
- hpp->t0->latency_timer = nui[1];
- hpp->t0->enable_serr = nui[2];
- hpp->t0->enable_perr = nui[3];
-
- pr_debug(" _HPP: cache_line_size=0x%x\n", hpp->t0->cache_line_size);
- pr_debug(" _HPP: latency timer =0x%x\n", hpp->t0->latency_timer);
- pr_debug(" _HPP: enable SERR =0x%x\n", hpp->t0->enable_serr);
- pr_debug(" _HPP: enable PERR =0x%x\n", hpp->t0->enable_perr);
+ hpp->t0->revision = 1;
+ hpp->t0->cache_line_size = fields[0].integer.value;
+ hpp->t0->latency_timer = fields[1].integer.value;
+ hpp->t0->enable_serr = fields[2].integer.value;
+ hpp->t0->enable_perr = fields[3].integer.value;
-free_and_return:
- kfree(string.pointer);
- kfree(ret_buf.pointer);
+exit:
+ kfree(buffer.pointer);
return status;
}
@@ -322,20 +282,19 @@ static acpi_status acpi_run_oshp(acpi_handle handle)
return status;
}
-/* acpi_get_hp_params_from_firmware
+/* pci_get_hp_params
*
- * @bus - the pci_bus of the bus on which the device is newly added
+ * @dev - the pci_dev for which we want parameters
* @hpp - allocated by the caller
*/
-acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
- struct hotplug_params *hpp)
+int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
{
- acpi_status status = AE_NOT_FOUND;
+ acpi_status status;
acpi_handle handle, phandle;
struct pci_bus *pbus;
handle = NULL;
- for (pbus = bus; pbus; pbus = pbus->parent) {
+ for (pbus = dev->bus; pbus; pbus = pbus->parent) {
handle = acpi_pci_get_bridge_handle(pbus);
if (handle)
break;
@@ -345,15 +304,15 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
* _HPP settings apply to all child buses, until another _HPP is
* encountered. If we don't find an _HPP for the input pci dev,
* look for it in the parent device scope since that would apply to
- * this pci dev. If we don't find any _HPP, use hardcoded defaults
+ * this pci dev.
*/
while (handle) {
status = acpi_run_hpx(handle, hpp);
if (ACPI_SUCCESS(status))
- break;
+ return 0;
status = acpi_run_hpp(handle, hpp);
if (ACPI_SUCCESS(status))
- break;
+ return 0;
if (acpi_is_root_bridge(handle))
break;
status = acpi_get_parent(handle, &phandle);
@@ -361,9 +320,9 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
break;
handle = phandle;
}
- return status;
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
+EXPORT_SYMBOL_GPL(pci_get_hp_params);
/**
* acpi_get_hp_hw_control_from_firmware
@@ -500,18 +459,18 @@ check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
/**
* acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots
- * @pbus - PCI bus to scan
+ * @handle - handle of the PCI bus to scan
*
* Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise.
*/
-int acpi_pci_detect_ejectable(struct pci_bus *pbus)
+int acpi_pci_detect_ejectable(acpi_handle handle)
{
- acpi_handle handle;
int found = 0;
- if (!(handle = acpi_pci_get_bridge_handle(pbus)))
- return 0;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
+ if (!handle)
+ return found;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
check_hotplug, (void *)&found, NULL);
return found;
}
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index e68d5f20ffb3..7d938df79206 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -91,9 +91,6 @@ struct acpiphp_bridge {
/* PCI-to-PCI bridge device */
struct pci_dev *pci_dev;
- /* ACPI 2.0 _HPP parameters */
- struct hotplug_params hpp;
-
spinlock_t res_lock;
};
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0cb0f830a993..58d25a163a8b 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(ioapic_list_lock);
static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
-static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus);
+static void acpiphp_set_hpp_values(struct pci_bus *bus);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
/* callback routine to check for the existence of a pci dock device */
@@ -261,51 +261,21 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
/* see if it's worth looking at this bridge */
-static int detect_ejectable_slots(struct pci_bus *pbus)
+static int detect_ejectable_slots(acpi_handle handle)
{
- int found = acpi_pci_detect_ejectable(pbus);
+ int found = acpi_pci_detect_ejectable(handle);
if (!found) {
- acpi_handle bridge_handle = acpi_pci_get_bridge_handle(pbus);
- if (!bridge_handle)
- return 0;
- acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge_handle, (u32)1,
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
is_pci_dock_device, (void *)&found, NULL);
}
return found;
}
-
-/* decode ACPI 2.0 _HPP hot plug parameters */
-static void decode_hpp(struct acpiphp_bridge *bridge)
-{
- acpi_status status;
-
- status = acpi_get_hp_params_from_firmware(bridge->pci_bus, &bridge->hpp);
- if (ACPI_FAILURE(status) ||
- !bridge->hpp.t0 || (bridge->hpp.t0->revision > 1)) {
- /* use default numbers */
- printk(KERN_WARNING
- "%s: Could not get hotplug parameters. Use defaults\n",
- __func__);
- bridge->hpp.t0 = &bridge->hpp.type0_data;
- bridge->hpp.t0->revision = 0;
- bridge->hpp.t0->cache_line_size = 0x10;
- bridge->hpp.t0->latency_timer = 0x40;
- bridge->hpp.t0->enable_serr = 0;
- bridge->hpp.t0->enable_perr = 0;
- }
-}
-
-
-
/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
static void init_bridge_misc(struct acpiphp_bridge *bridge)
{
acpi_status status;
- /* decode ACPI 2.0 _HPP (hot plug parameters) */
- decode_hpp(bridge);
-
/* must be added to the list prior to calling register_slot */
list_add(&bridge->list, &bridge_list);
@@ -399,9 +369,10 @@ static inline void config_p2p_bridge_flags(struct acpiphp_bridge *bridge)
/* allocate and initialize host bridge data structure */
-static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
+static void add_host_bridge(acpi_handle *handle)
{
struct acpiphp_bridge *bridge;
+ struct acpi_pci_root *root = acpi_pci_find_root(handle);
bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
if (bridge == NULL)
@@ -410,7 +381,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
bridge->type = BRIDGE_TYPE_HOST;
bridge->handle = handle;
- bridge->pci_bus = pci_bus;
+ bridge->pci_bus = root->bus;
spin_lock_init(&bridge->res_lock);
@@ -419,7 +390,7 @@ static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
/* allocate and initialize PCI-to-PCI bridge data structure */
-static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
+static void add_p2p_bridge(acpi_handle *handle)
{
struct acpiphp_bridge *bridge;
@@ -433,8 +404,8 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
bridge->handle = handle;
config_p2p_bridge_flags(bridge);
- bridge->pci_dev = pci_dev_get(pci_dev);
- bridge->pci_bus = pci_dev->subordinate;
+ bridge->pci_dev = acpi_get_pci_dev(handle);
+ bridge->pci_bus = bridge->pci_dev->subordinate;
if (!bridge->pci_bus) {
err("This is not a PCI-to-PCI bridge!\n");
goto err;
@@ -451,7 +422,7 @@ static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
init_bridge_misc(bridge);
return;
err:
- pci_dev_put(pci_dev);
+ pci_dev_put(bridge->pci_dev);
kfree(bridge);
return;
}
@@ -462,39 +433,21 @@ static acpi_status
find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
{
acpi_status status;
- acpi_handle dummy_handle;
- unsigned long long tmp;
- int device, function;
struct pci_dev *dev;
- struct pci_bus *pci_bus = context;
-
- status = acpi_get_handle(handle, "_ADR", &dummy_handle);
- if (ACPI_FAILURE(status))
- return AE_OK; /* continue */
-
- status = acpi_evaluate_integer(handle, "_ADR", NULL, &tmp);
- if (ACPI_FAILURE(status)) {
- dbg("%s: _ADR evaluation failure\n", __func__);
- return AE_OK;
- }
-
- device = (tmp >> 16) & 0xffff;
- function = tmp & 0xffff;
-
- dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
+ dev = acpi_get_pci_dev(handle);
if (!dev || !dev->subordinate)
goto out;
/* check if this bridge has ejectable slots */
- if ((detect_ejectable_slots(dev->subordinate) > 0)) {
+ if ((detect_ejectable_slots(handle) > 0)) {
dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
- add_p2p_bridge(handle, dev);
+ add_p2p_bridge(handle);
}
/* search P2P bridges under this p2p bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, dev->subordinate, NULL);
+ find_p2p_bridge, NULL, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
@@ -509,9 +462,7 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
- int seg, bus;
acpi_handle dummy_handle;
- struct pci_bus *pci_bus;
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
@@ -526,36 +477,15 @@ static int add_bridge(acpi_handle handle)
return 0;
}
- /* get PCI segment number */
- status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
-
- seg = ACPI_SUCCESS(status) ? tmp : 0;
-
- /* get PCI bus number */
- status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
-
- if (ACPI_SUCCESS(status)) {
- bus = tmp;
- } else {
- warn("can't get bus number, assuming 0\n");
- bus = 0;
- }
-
- pci_bus = pci_find_bus(seg, bus);
- if (!pci_bus) {
- err("Can't find bus %04x:%02x\n", seg, bus);
- return 0;
- }
-
/* check if this bridge has ejectable slots */
- if (detect_ejectable_slots(pci_bus) > 0) {
+ if (detect_ejectable_slots(handle) > 0) {
dbg("found PCI host-bus bridge with hot-pluggable slots\n");
- add_host_bridge(handle, pci_bus);
+ add_host_bridge(handle);
}
/* search P2P bridges under this host bridge */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- find_p2p_bridge, pci_bus, NULL);
+ find_p2p_bridge, NULL, NULL);
if (ACPI_FAILURE(status))
warn("find_p2p_bridge failed (error code = 0x%x)\n", status);
@@ -1083,7 +1013,7 @@ static int __ref enable_device(struct acpiphp_slot *slot)
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(slot->bridge->handle, bus);
+ acpiphp_set_hpp_values(bus);
list_for_each_entry(func, &slot->funcs, sibling)
acpiphp_configure_ioapics(func->handle);
pci_enable_bridges(bus);
@@ -1294,70 +1224,12 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
return retval;
}
-static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge)
+static void acpiphp_set_hpp_values(struct pci_bus *bus)
{
- u16 pci_cmd, pci_bctl;
- struct pci_dev *cdev;
-
- /* Program hpp values for this device */
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
- return;
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
- bridge->hpp.t0->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER,
- bridge->hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (bridge->hpp.t0->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (bridge->hpp.t0->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value and child devices */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- bridge->hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (bridge->hpp.t0->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (bridge->hpp.t0->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- program_hpp(cdev, bridge);
- }
- }
-}
-
-static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus)
-{
- struct acpiphp_bridge bridge;
struct pci_dev *dev;
- memset(&bridge, 0, sizeof(bridge));
- bridge.handle = handle;
- bridge.pci_bus = bus;
- bridge.pci_dev = bus->self;
- decode_hpp(&bridge);
list_for_each_entry(dev, &bus->devices, bus_list)
- program_hpp(dev, &bridge);
-
+ pci_configure_slot(dev);
}
/*
@@ -1387,24 +1259,23 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
/* Program resources in newly inserted bridge */
static int acpiphp_configure_bridge (acpi_handle handle)
{
- struct pci_dev *dev;
struct pci_bus *bus;
- dev = acpi_get_pci_dev(handle);
- if (!dev) {
- err("cannot get PCI domain and bus number for bridge\n");
- return -EINVAL;
+ if (acpi_is_root_bridge(handle)) {
+ struct acpi_pci_root *root = acpi_pci_find_root(handle);
+ bus = root->bus;
+ } else {
+ struct pci_dev *pdev = acpi_get_pci_dev(handle);
+ bus = pdev->subordinate;
+ pci_dev_put(pdev);
}
- bus = dev->bus;
-
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
acpiphp_sanitize_bus(bus);
- acpiphp_set_hpp_values(handle, bus);
+ acpiphp_set_hpp_values(bus);
pci_enable_bridges(bus);
acpiphp_configure_ioapics(handle);
- pci_dev_put(dev);
return 0;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 5befa7e379b7..e7be66dbac21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -398,23 +398,20 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
acpi_handle *phandle = (acpi_handle *)context;
acpi_status status;
struct acpi_device_info *info;
- struct acpi_buffer info_buffer = { ACPI_ALLOCATE_BUFFER, NULL };
int retval = 0;
- status = acpi_get_object_info(handle, &info_buffer);
+ status = acpi_get_object_info(handle, &info);
if (ACPI_FAILURE(status)) {
err("%s: Failed to get device information status=0x%x\n",
__func__, status);
return retval;
}
- info = info_buffer.pointer;
- info->hardware_id.value[sizeof(info->hardware_id.value) - 1] = '\0';
if (info->current_status && (info->valid & ACPI_VALID_HID) &&
- (!strcmp(info->hardware_id.value, IBM_HARDWARE_ID1) ||
- !strcmp(info->hardware_id.value, IBM_HARDWARE_ID2))) {
+ (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
+ !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
dbg("found hardware: %s, handle: %p\n",
- info->hardware_id.value, handle);
+ info->hardware_id.string, handle);
*phandle = handle;
/* returning non-zero causes the search to stop
* and returns this value to the caller of
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 53836001d511..9c6a9fd26812 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -32,6 +32,7 @@
#include <asm/io.h> /* for read? and write? functions */
#include <linux/delay.h> /* for delays */
#include <linux/mutex.h>
+#include <linux/sched.h> /* for signal_pending() */
#define MY_NAME "cpqphp"
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5c5043f239cf..0325d989bb46 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -86,7 +86,8 @@ static char *pci_bus_speed_strings[] = {
"66 MHz PCIX 533", /* 0x11 */
"100 MHz PCIX 533", /* 0x12 */
"133 MHz PCIX 533", /* 0x13 */
- "25 GBps PCI-E", /* 0x14 */
+ "2.5 GT/s PCI-E", /* 0x14 */
+ "5.0 GT/s PCI-E", /* 0x15 */
};
#ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e6cf096498be..3070f77eb56a 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -72,15 +72,9 @@ do { \
#define SLOT_NAME_SIZE 10
struct slot {
- u8 bus;
- u8 device;
u8 state;
- u8 hp_slot;
- u32 number;
struct controller *ctrl;
- struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
- struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -92,18 +86,10 @@ struct event_info {
};
struct controller {
- struct mutex crit_sect; /* critical section mutex */
struct mutex ctrl_lock; /* controller lock */
- int num_slots; /* Number of slots on ctlr */
- int slot_num_inc; /* 1 or -1 */
- struct pci_dev *pci_dev;
struct pcie_device *pcie; /* PCI Express port service */
- struct list_head slot_list;
- struct hpc_ops *hpc_ops;
+ struct slot *slot;
wait_queue_head_t queue; /* sleep & wake process */
- u8 slot_device_offset;
- u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */
- u8 slot_bus; /* Bus where the slots handled by this controller sit */
u32 slot_cap;
u8 cap_base;
struct timer_list poll_timer;
@@ -131,40 +117,20 @@ struct controller {
#define POWERON_STATE 3
#define POWEROFF_STATE 4
-/* Error messages */
-#define INTERLOCK_OPEN 0x00000002
-#define ADD_NOT_SUPPORTED 0x00000003
-#define CARD_FUNCTIONING 0x00000005
-#define ADAPTER_NOT_SAME 0x00000006
-#define NO_ADAPTER_PRESENT 0x00000009
-#define NOT_ENOUGH_RESOURCES 0x0000000B
-#define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C
-#define WRONG_BUS_FREQUENCY 0x0000000D
-#define POWER_FAILURE 0x0000000E
-
-/* Field definitions in Slot Capabilities Register */
-#define ATTN_BUTTN_PRSN 0x00000001
-#define PWR_CTRL_PRSN 0x00000002
-#define MRL_SENS_PRSN 0x00000004
-#define ATTN_LED_PRSN 0x00000008
-#define PWR_LED_PRSN 0x00000010
-#define HP_SUPR_RM_SUP 0x00000020
-#define EMI_PRSN 0x00020000
-#define NO_CMD_CMPL_SUP 0x00040000
-
-#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & ATTN_BUTTN_PRSN)
-#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PWR_CTRL_PRSN)
-#define MRL_SENS(ctrl) ((ctrl)->slot_cap & MRL_SENS_PRSN)
-#define ATTN_LED(ctrl) ((ctrl)->slot_cap & ATTN_LED_PRSN)
-#define PWR_LED(ctrl) ((ctrl)->slot_cap & PWR_LED_PRSN)
-#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & HP_SUPR_RM_SUP)
-#define EMI(ctrl) ((ctrl)->slot_cap & EMI_PRSN)
-#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & NO_CMD_CMPL_SUP)
+#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
+#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
+#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP)
+#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP)
+#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP)
+#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
+#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
+#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
+#define PSN(ctrl) ((ctrl)->slot_cap >> 19)
extern int pciehp_sysfs_enable_slot(struct slot *slot);
extern int pciehp_sysfs_disable_slot(struct slot *slot);
extern u8 pciehp_handle_attention_button(struct slot *p_slot);
- extern u8 pciehp_handle_switch_change(struct slot *p_slot);
+extern u8 pciehp_handle_switch_change(struct slot *p_slot);
extern u8 pciehp_handle_presence_change(struct slot *p_slot);
extern u8 pciehp_handle_power_fault(struct slot *p_slot);
extern int pciehp_configure_device(struct slot *p_slot);
@@ -175,45 +141,30 @@ int pcie_init_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
int pcie_enable_notification(struct controller *ctrl);
+int pciehp_power_on_slot(struct slot *slot);
+int pciehp_power_off_slot(struct slot *slot);
+int pciehp_get_power_status(struct slot *slot, u8 *status);
+int pciehp_get_attention_status(struct slot *slot, u8 *status);
+
+int pciehp_set_attention_status(struct slot *slot, u8 status);
+int pciehp_get_latch_status(struct slot *slot, u8 *status);
+int pciehp_get_adapter_status(struct slot *slot, u8 *status);
+int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed);
+int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val);
+int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed);
+int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val);
+int pciehp_query_power_fault(struct slot *slot);
+void pciehp_green_led_on(struct slot *slot);
+void pciehp_green_led_off(struct slot *slot);
+void pciehp_green_led_blink(struct slot *slot);
+int pciehp_check_link_status(struct controller *ctrl);
+void pciehp_release_ctrl(struct controller *ctrl);
static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
-static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
-{
- struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (slot->device == device)
- return slot;
- }
-
- ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
- return NULL;
-}
-
-struct hpc_ops {
- int (*power_on_slot)(struct slot *slot);
- int (*power_off_slot)(struct slot *slot);
- int (*get_power_status)(struct slot *slot, u8 *status);
- int (*get_attention_status)(struct slot *slot, u8 *status);
- int (*set_attention_status)(struct slot *slot, u8 status);
- int (*get_latch_status)(struct slot *slot, u8 *status);
- int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
- int (*get_cur_lnk_width)(struct slot *slot, enum pcie_link_width *val);
- int (*query_power_fault)(struct slot *slot);
- void (*green_led_on)(struct slot *slot);
- void (*green_led_off)(struct slot *slot);
- void (*green_led_blink)(struct slot *slot);
- void (*release_ctlr)(struct controller *ctrl);
- int (*check_lnk_status)(struct controller *ctrl);
-};
-
#ifdef CONFIG_ACPI
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
@@ -237,17 +188,8 @@ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
return retval;
return pciehp_acpi_slot_detection_check(dev);
}
-
-static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
- struct hotplug_params *hpp)
-{
- if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp)))
- return -ENODEV;
- return 0;
-}
#else
#define pciehp_firmware_init() do {} while (0)
#define pciehp_get_hp_hw_control_from_firmware(dev) 0
-#define pciehp_get_hp_params_from_firmware(dev, hpp) (-ENODEV)
#endif /* CONFIG_ACPI */
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 96048010e7d9..37c8d3d0323e 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -33,6 +33,11 @@
#define PCIEHP_DETECT_AUTO (2)
#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
+struct dummy_slot {
+ u32 number;
+ struct list_head list;
+};
+
static int slot_detection_mode;
static char *pciehp_detect_mode;
module_param(pciehp_detect_mode, charp, 0444);
@@ -47,7 +52,7 @@ int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
{
if (slot_detection_mode != PCIEHP_DETECT_ACPI)
return 0;
- if (acpi_pci_detect_ejectable(dev->subordinate))
+ if (acpi_pci_detect_ejectable(DEVICE_ACPI_HANDLE(&dev->dev)))
return 0;
return -ENODEV;
}
@@ -76,9 +81,9 @@ static int __init dummy_probe(struct pcie_device *dev)
{
int pos;
u32 slot_cap;
- struct slot *slot, *tmp;
+ acpi_handle handle;
+ struct dummy_slot *slot, *tmp;
struct pci_dev *pdev = dev->port;
- struct pci_bus *pbus = pdev->subordinate;
/* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
if (pciehp_get_hp_hw_control_from_firmware(pdev))
return -ENODEV;
@@ -89,12 +94,13 @@ static int __init dummy_probe(struct pcie_device *dev)
if (!slot)
return -ENOMEM;
slot->number = slot_cap >> 19;
- list_for_each_entry(tmp, &dummy_slots, slot_list) {
+ list_for_each_entry(tmp, &dummy_slots, list) {
if (tmp->number == slot->number)
dup_slot_id++;
}
- list_add_tail(&slot->slot_list, &dummy_slots);
- if (!acpi_slot_detected && acpi_pci_detect_ejectable(pbus))
+ list_add_tail(&slot->list, &dummy_slots);
+ handle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
acpi_slot_detected = 1;
return -ENODEV; /* dummy driver always returns error */
}
@@ -108,11 +114,11 @@ static struct pcie_port_service_driver __initdata dummy_driver = {
static int __init select_detection_mode(void)
{
- struct slot *slot, *tmp;
+ struct dummy_slot *slot, *tmp;
pcie_port_service_register(&dummy_driver);
pcie_port_service_unregister(&dummy_driver);
- list_for_each_entry_safe(slot, tmp, &dummy_slots, slot_list) {
- list_del(&slot->slot_list);
+ list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
+ list_del(&slot->list);
kfree(slot);
}
if (acpi_slot_detected && dup_slot_id)
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 2317557fdee6..bc234719b1df 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -99,65 +99,55 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(hotplug_slot);
}
-static int init_slots(struct controller *ctrl)
+static int init_slot(struct controller *ctrl)
{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
+ struct slot *slot = ctrl->slot;
+ struct hotplug_slot *hotplug = NULL;
+ struct hotplug_slot_info *info = NULL;
char name[SLOT_NAME_SIZE];
int retval = -ENOMEM;
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
- goto error;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto error_hpslot;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->info = info;
- hotplug_slot->private = slot;
- hotplug_slot->release = &release_slot;
- hotplug_slot->ops = &pciehp_hotplug_slot_ops;
- slot->hotplug_slot = hotplug_slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
-
- ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x "
- "hp_slot=%x sun=%x slot_device_offset=%x\n",
- pci_domain_nr(ctrl->pci_dev->subordinate),
- slot->bus, slot->device, slot->hp_slot, slot->number,
- ctrl->slot_device_offset);
- retval = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->subordinate,
- slot->device,
- name);
- if (retval) {
- ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
- retval);
- goto error_info;
- }
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
+ if (!hotplug)
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto out;
+
+ /* register this slot with the hotplug pci core */
+ hotplug->info = info;
+ hotplug->private = slot;
+ hotplug->release = &release_slot;
+ hotplug->ops = &pciehp_hotplug_slot_ops;
+ slot->hotplug_slot = hotplug;
+ snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
+
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n",
+ pci_domain_nr(ctrl->pcie->port->subordinate),
+ ctrl->pcie->port->subordinate->number, PSN(ctrl));
+ retval = pci_hp_register(hotplug,
+ ctrl->pcie->port->subordinate, 0, name);
+ if (retval) {
+ ctrl_err(ctrl,
+ "pci_hp_register failed with error %d\n", retval);
+ goto out;
+ }
+ get_power_status(hotplug, &info->power_status);
+ get_attention_status(hotplug, &info->attention_status);
+ get_latch_status(hotplug, &info->latch_status);
+ get_adapter_status(hotplug, &info->adapter_status);
+out:
+ if (retval) {
+ kfree(info);
+ kfree(hotplug);
}
-
- return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
-error:
return retval;
}
-static void cleanup_slots(struct controller *ctrl)
+static void cleanup_slot(struct controller *ctrl)
{
- struct slot *slot;
- list_for_each_entry(slot, &ctrl->slot_list, slot_list)
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(ctrl->slot->hotplug_slot);
}
/*
@@ -173,7 +163,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
hotplug_slot->info->attention_status = status;
if (ATTN_LED(slot->ctrl))
- slot->hpc_ops->set_attention_status(slot, status);
+ pciehp_set_attention_status(slot, status);
return 0;
}
@@ -208,7 +198,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_power_status(slot, value);
+ retval = pciehp_get_power_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->power_status;
@@ -223,7 +213,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_attention_status(slot, value);
+ retval = pciehp_get_attention_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->attention_status;
@@ -238,7 +228,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_latch_status(slot, value);
+ retval = pciehp_get_latch_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->latch_status;
@@ -253,7 +243,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_adapter_status(slot, value);
+ retval = pciehp_get_adapter_status(slot, value);
if (retval < 0)
*value = hotplug_slot->info->adapter_status;
@@ -269,7 +259,7 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_max_bus_speed(slot, value);
+ retval = pciehp_get_max_link_speed(slot, value);
if (retval < 0)
*value = PCI_SPEED_UNKNOWN;
@@ -284,7 +274,7 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
+ retval = pciehp_get_cur_link_speed(slot, value);
if (retval < 0)
*value = PCI_SPEED_UNKNOWN;
@@ -295,7 +285,7 @@ static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
- struct slot *t_slot;
+ struct slot *slot;
u8 value;
struct pci_dev *pdev = dev->port;
@@ -314,7 +304,7 @@ static int pciehp_probe(struct pcie_device *dev)
set_service_data(dev, ctrl);
/* Setup the slot information structures */
- rc = init_slots(ctrl);
+ rc = init_slot(ctrl);
if (rc) {
if (rc == -EBUSY)
ctrl_warn(ctrl, "Slot already registered by another "
@@ -332,15 +322,15 @@ static int pciehp_probe(struct pcie_device *dev)
}
/* Check if slot is occupied */
- t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
- t_slot->hpc_ops->get_adapter_status(t_slot, &value);
+ slot = ctrl->slot;
+ pciehp_get_adapter_status(slot, &value);
if (value) {
if (pciehp_force)
- pciehp_enable_slot(t_slot);
+ pciehp_enable_slot(slot);
} else {
/* Power off slot if not occupied */
if (POWER_CTRL(ctrl)) {
- rc = t_slot->hpc_ops->power_off_slot(t_slot);
+ rc = pciehp_power_off_slot(slot);
if (rc)
goto err_out_free_ctrl_slot;
}
@@ -349,19 +339,19 @@ static int pciehp_probe(struct pcie_device *dev)
return 0;
err_out_free_ctrl_slot:
- cleanup_slots(ctrl);
+ cleanup_slot(ctrl);
err_out_release_ctlr:
- ctrl->hpc_ops->release_ctlr(ctrl);
+ pciehp_release_ctrl(ctrl);
err_out_none:
return -ENODEV;
}
-static void pciehp_remove (struct pcie_device *dev)
+static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- cleanup_slots(ctrl);
- ctrl->hpc_ops->release_ctlr(ctrl);
+ cleanup_slot(ctrl);
+ pciehp_release_ctrl(ctrl);
}
#ifdef CONFIG_PM
@@ -376,20 +366,20 @@ static int pciehp_resume (struct pcie_device *dev)
dev_info(&dev->device, "%s ENTRY\n", __func__);
if (pciehp_force) {
struct controller *ctrl = get_service_data(dev);
- struct slot *t_slot;
+ struct slot *slot;
u8 status;
/* reinitialize the chipset's event detection logic */
pcie_enable_notification(ctrl);
- t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
+ slot = ctrl->slot;
/* Check if slot is occupied */
- t_slot->hpc_ops->get_adapter_status(t_slot, &status);
+ pciehp_get_adapter_status(slot, &status);
if (status)
- pciehp_enable_slot(t_slot);
+ pciehp_enable_slot(slot);
else
- pciehp_disable_slot(t_slot);
+ pciehp_disable_slot(slot);
}
return 0;
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 8aab8edf123e..84487d126e4d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -82,7 +82,7 @@ u8 pciehp_handle_switch_change(struct slot *p_slot)
/* Switch Change */
ctrl_dbg(ctrl, "Switch interrupt received\n");
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ pciehp_get_latch_status(p_slot, &getstatus);
if (getstatus) {
/*
* Switch opened
@@ -114,7 +114,7 @@ u8 pciehp_handle_presence_change(struct slot *p_slot)
/* Switch is open, assume a presence change
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &presence_save);
+ pciehp_get_adapter_status(p_slot, &presence_save);
if (presence_save) {
/*
* Card Present
@@ -143,7 +143,7 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
/* power fault */
ctrl_dbg(ctrl, "Power fault interrupt received\n");
- if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!pciehp_query_power_fault(p_slot)) {
/*
* power fault Cleared
*/
@@ -172,7 +172,7 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- if (pslot->hpc_ops->power_off_slot(pslot)) {
+ if (pciehp_power_off_slot(pslot)) {
ctrl_err(ctrl,
"Issue of Slot Power Off command failed\n");
return;
@@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
}
if (PWR_LED(ctrl))
- pslot->hpc_ops->green_led_off(pslot);
+ pciehp_green_led_off(pslot);
if (ATTN_LED(ctrl)) {
- if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
+ if (pciehp_set_attention_status(pslot, 1)) {
ctrl_err(ctrl,
"Issue of Set Attention Led command failed\n");
return;
@@ -208,24 +208,20 @@ static int board_added(struct slot *p_slot)
{
int retval = 0;
struct controller *ctrl = p_slot->ctrl;
- struct pci_bus *parent = ctrl->pci_dev->subordinate;
-
- ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n",
- __func__, p_slot->device, ctrl->slot_device_offset,
- p_slot->hp_slot);
+ struct pci_bus *parent = ctrl->pcie->port->subordinate;
if (POWER_CTRL(ctrl)) {
/* Power on slot */
- retval = p_slot->hpc_ops->power_on_slot(p_slot);
+ retval = pciehp_power_on_slot(p_slot);
if (retval)
return retval;
}
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_blink(p_slot);
+ pciehp_green_led_blink(p_slot);
/* Check link training status */
- retval = p_slot->hpc_ops->check_lnk_status(ctrl);
+ retval = pciehp_check_link_status(ctrl);
if (retval) {
ctrl_err(ctrl, "Failed to check link status\n");
set_slot_off(ctrl, p_slot);
@@ -233,26 +229,21 @@ static int board_added(struct slot *p_slot)
}
/* Check for a power fault */
- if (p_slot->hpc_ops->query_power_fault(p_slot)) {
+ if (pciehp_query_power_fault(p_slot)) {
ctrl_dbg(ctrl, "Power fault detected\n");
- retval = POWER_FAILURE;
+ retval = -EIO;
goto err_exit;
}
retval = pciehp_configure_device(p_slot);
if (retval) {
- ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
- pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
+ pci_domain_nr(parent), parent->number);
goto err_exit;
}
- /*
- * Some PCI Express root ports require fixup after hot-plug operation.
- */
- if (pcie_mch_quirk)
- pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_on(p_slot);
+ pciehp_green_led_on(p_slot);
return 0;
@@ -274,11 +265,9 @@ static int remove_board(struct slot *p_slot)
if (retval)
return retval;
- ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot);
-
if (POWER_CTRL(ctrl)) {
/* power off slot */
- retval = p_slot->hpc_ops->power_off_slot(p_slot);
+ retval = pciehp_power_off_slot(p_slot);
if (retval) {
ctrl_err(ctrl,
"Issue of Slot Disable command failed\n");
@@ -292,9 +281,9 @@ static int remove_board(struct slot *p_slot)
msleep(1000);
}
+ /* turn off Green LED */
if (PWR_LED(ctrl))
- /* turn off Green LED */
- p_slot->hpc_ops->green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
return 0;
}
@@ -322,18 +311,17 @@ static void pciehp_power_thread(struct work_struct *work)
case POWEROFF_STATE:
mutex_unlock(&p_slot->lock);
ctrl_dbg(p_slot->ctrl,
- "Disabling domain:bus:device=%04x:%02x:%02x\n",
- pci_domain_nr(p_slot->ctrl->pci_dev->subordinate),
- p_slot->bus, p_slot->device);
+ "Disabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
pciehp_disable_slot(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
- if (pciehp_enable_slot(p_slot) &&
- PWR_LED(p_slot->ctrl))
- p_slot->hpc_ops->green_led_off(p_slot);
+ if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl))
+ pciehp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
@@ -384,10 +372,10 @@ static int update_slot_info(struct slot *slot)
if (!info)
return -ENOMEM;
- slot->hpc_ops->get_power_status(slot, &(info->power_status));
- slot->hpc_ops->get_attention_status(slot, &(info->attention_status));
- slot->hpc_ops->get_latch_status(slot, &(info->latch_status));
- slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status));
+ pciehp_get_power_status(slot, &info->power_status);
+ pciehp_get_attention_status(slot, &info->attention_status);
+ pciehp_get_latch_status(slot, &info->latch_status);
+ pciehp_get_adapter_status(slot, &info->adapter_status);
result = pci_hp_change_slot_info(slot->hotplug_slot, info);
kfree (info);
@@ -404,7 +392,7 @@ static void handle_button_press_event(struct slot *p_slot)
switch (p_slot->state) {
case STATIC_STATE:
- p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ pciehp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl,
@@ -418,9 +406,9 @@ static void handle_button_press_event(struct slot *p_slot)
}
/* blink green LED and turn off amber */
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_blink(p_slot);
+ pciehp_green_led_blink(p_slot);
if (ATTN_LED(ctrl))
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(p_slot, 0);
schedule_delayed_work(&p_slot->work, 5*HZ);
break;
@@ -435,13 +423,13 @@ static void handle_button_press_event(struct slot *p_slot)
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE) {
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_on(p_slot);
+ pciehp_green_led_on(p_slot);
} else {
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
}
if (ATTN_LED(ctrl))
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled "
"due to button press\n", slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -479,7 +467,7 @@ static void handle_surprise_event(struct slot *p_slot)
info->p_slot = p_slot;
INIT_WORK(&info->work, pciehp_power_thread);
- p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ pciehp_get_adapter_status(p_slot, &getstatus);
if (!getstatus)
p_slot->state = POWEROFF_STATE;
else
@@ -503,9 +491,9 @@ static void interrupt_event_handler(struct work_struct *work)
if (!POWER_CTRL(ctrl))
break;
if (ATTN_LED(ctrl))
- p_slot->hpc_ops->set_attention_status(p_slot, 1);
+ pciehp_set_attention_status(p_slot, 1);
if (PWR_LED(ctrl))
- p_slot->hpc_ops->green_led_off(p_slot);
+ pciehp_green_led_off(p_slot);
break;
case INT_PRESENCE_ON:
case INT_PRESENCE_OFF:
@@ -530,45 +518,38 @@ int pciehp_enable_slot(struct slot *p_slot)
int rc;
struct controller *ctrl = p_slot->ctrl;
- /* Check to see if (latch closed, card present, power off) */
- mutex_lock(&p_slot->ctrl->crit_sect);
-
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = pciehp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
if (MRL_SENS(p_slot->ctrl)) {
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = pciehp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
}
if (POWER_CTRL(p_slot->ctrl)) {
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = pciehp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
}
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ pciehp_get_latch_status(p_slot, &getstatus);
rc = board_added(p_slot);
if (rc) {
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ pciehp_get_latch_status(p_slot, &getstatus);
}
update_slot_info(p_slot);
- mutex_unlock(&p_slot->ctrl->crit_sect);
return rc;
}
@@ -582,35 +563,29 @@ int pciehp_disable_slot(struct slot *p_slot)
if (!p_slot->ctrl)
return 1;
- /* Check to see if (latch closed, card present, power on) */
- mutex_lock(&p_slot->ctrl->crit_sect);
-
if (!HP_SUPR_RM(p_slot->ctrl)) {
- ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ ret = pciehp_get_adapter_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
}
if (MRL_SENS(p_slot->ctrl)) {
- ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ ret = pciehp_get_latch_status(p_slot, &getstatus);
if (ret || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -ENODEV;
}
}
if (POWER_CTRL(p_slot->ctrl)) {
- ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ ret = pciehp_get_power_status(p_slot, &getstatus);
if (ret || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
- mutex_unlock(&p_slot->ctrl->crit_sect);
return -EINVAL;
}
}
@@ -618,7 +593,6 @@ int pciehp_disable_slot(struct slot *p_slot)
ret = remove_board(p_slot);
update_slot_info(p_slot);
- mutex_unlock(&p_slot->ctrl->crit_sect);
return ret;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 52813257e5bf..9ef4605c1ef6 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -44,25 +44,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_read_config_word(dev, ctrl->cap_base + reg, value);
}
static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_read_config_dword(dev, ctrl->cap_base + reg, value);
}
static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_write_config_word(dev, ctrl->cap_base + reg, value);
}
static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
return pci_write_config_dword(dev, ctrl->cap_base + reg, value);
}
@@ -266,7 +266,7 @@ static void pcie_wait_link_active(struct controller *ctrl)
ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n");
}
-static int hpc_check_lnk_status(struct controller *ctrl)
+int pciehp_check_link_status(struct controller *ctrl)
{
u16 lnk_status;
int retval = 0;
@@ -305,7 +305,7 @@ static int hpc_check_lnk_status(struct controller *ctrl)
return retval;
}
-static int hpc_get_attention_status(struct slot *slot, u8 *status)
+int pciehp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_ctrl;
@@ -344,7 +344,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot *slot, u8 *status)
+int pciehp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_ctrl;
@@ -376,7 +376,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
return retval;
}
-static int hpc_get_latch_status(struct slot *slot, u8 *status)
+int pciehp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
@@ -392,7 +392,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+int pciehp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
@@ -408,7 +408,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_query_power_fault(struct slot *slot)
+int pciehp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_status;
@@ -422,7 +422,7 @@ static int hpc_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_set_attention_status(struct slot *slot, u8 value)
+int pciehp_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -450,7 +450,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
return rc;
}
-static void hpc_set_green_led_on(struct slot *slot)
+void pciehp_green_led_on(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -463,7 +463,7 @@ static void hpc_set_green_led_on(struct slot *slot)
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
-static void hpc_set_green_led_off(struct slot *slot)
+void pciehp_green_led_off(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -476,7 +476,7 @@ static void hpc_set_green_led_off(struct slot *slot)
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
-static void hpc_set_green_led_blink(struct slot *slot)
+void pciehp_green_led_blink(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -489,7 +489,7 @@ static void hpc_set_green_led_blink(struct slot *slot)
__func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd);
}
-static int hpc_power_on_slot(struct slot * slot)
+int pciehp_power_on_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -497,8 +497,6 @@ static int hpc_power_on_slot(struct slot * slot)
u16 slot_status;
int retval = 0;
- ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
-
/* Clear sticky power-fault bit from previous power failures */
retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
if (retval) {
@@ -539,7 +537,7 @@ static int hpc_power_on_slot(struct slot * slot)
static inline int pcie_mask_bad_dllp(struct controller *ctrl)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
int pos;
u32 reg;
@@ -556,7 +554,7 @@ static inline int pcie_mask_bad_dllp(struct controller *ctrl)
static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
{
- struct pci_dev *dev = ctrl->pci_dev;
+ struct pci_dev *dev = ctrl->pcie->port;
u32 reg;
int pos;
@@ -570,7 +568,7 @@ static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
}
-static int hpc_power_off_slot(struct slot * slot)
+int pciehp_power_off_slot(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
@@ -578,8 +576,6 @@ static int hpc_power_off_slot(struct slot * slot)
int retval = 0;
int changed;
- ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot);
-
/*
* Set Bad DLLP Mask bit in Correctable Error Mask
* Register. This is the workaround against Bad DLLP error
@@ -614,8 +610,8 @@ static int hpc_power_off_slot(struct slot * slot)
static irqreturn_t pcie_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
+ struct slot *slot = ctrl->slot;
u16 detected, intr_loc;
- struct slot *p_slot;
/*
* In order to guarantee that all interrupt events are
@@ -656,29 +652,27 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
return IRQ_HANDLED;
- p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
-
/* Check MRL Sensor Changed */
if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
- pciehp_handle_switch_change(p_slot);
+ pciehp_handle_switch_change(slot);
/* Check Attention Button Pressed */
if (intr_loc & PCI_EXP_SLTSTA_ABP)
- pciehp_handle_attention_button(p_slot);
+ pciehp_handle_attention_button(slot);
/* Check Presence Detect Changed */
if (intr_loc & PCI_EXP_SLTSTA_PDC)
- pciehp_handle_presence_change(p_slot);
+ pciehp_handle_presence_change(slot);
/* Check Power Fault Detected */
if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- pciehp_handle_power_fault(p_slot);
+ pciehp_handle_power_fault(slot);
}
return IRQ_HANDLED;
}
-static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
+int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_speed lnk_speed;
@@ -693,7 +687,10 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
switch (lnk_cap & 0x000F) {
case 1:
- lnk_speed = PCIE_2PT5GB;
+ lnk_speed = PCIE_2_5GB;
+ break;
+ case 2:
+ lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
@@ -706,7 +703,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_get_max_lnk_width(struct slot *slot,
+int pciehp_get_max_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
struct controller *ctrl = slot->ctrl;
@@ -756,7 +753,7 @@ static int hpc_get_max_lnk_width(struct slot *slot,
return retval;
}
-static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
+int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value)
{
struct controller *ctrl = slot->ctrl;
enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
@@ -772,7 +769,10 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
case 1:
- lnk_speed = PCIE_2PT5GB;
+ lnk_speed = PCIE_2_5GB;
+ break;
+ case 2:
+ lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
@@ -785,7 +785,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_get_cur_lnk_width(struct slot *slot,
+int pciehp_get_cur_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
struct controller *ctrl = slot->ctrl;
@@ -836,30 +836,6 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
return retval;
}
-static void pcie_release_ctrl(struct controller *ctrl);
-static struct hpc_ops pciehp_hpc_ops = {
- .power_on_slot = hpc_power_on_slot,
- .power_off_slot = hpc_power_off_slot,
- .set_attention_status = hpc_set_attention_status,
- .get_power_status = hpc_get_power_status,
- .get_attention_status = hpc_get_attention_status,
- .get_latch_status = hpc_get_latch_status,
- .get_adapter_status = hpc_get_adapter_status,
-
- .get_max_bus_speed = hpc_get_max_lnk_speed,
- .get_cur_bus_speed = hpc_get_cur_lnk_speed,
- .get_max_lnk_width = hpc_get_max_lnk_width,
- .get_cur_lnk_width = hpc_get_cur_lnk_width,
-
- .query_power_fault = hpc_query_power_fault,
- .green_led_on = hpc_set_green_led_on,
- .green_led_off = hpc_set_green_led_off,
- .green_led_blink = hpc_set_green_led_blink,
-
- .release_ctlr = pcie_release_ctrl,
- .check_lnk_status = hpc_check_lnk_status,
-};
-
int pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -924,23 +900,16 @@ static int pcie_init_slot(struct controller *ctrl)
if (!slot)
return -ENOMEM;
- slot->hp_slot = 0;
slot->ctrl = ctrl;
- slot->bus = ctrl->pci_dev->subordinate->number;
- slot->device = ctrl->slot_device_offset + slot->hp_slot;
- slot->hpc_ops = ctrl->hpc_ops;
- slot->number = ctrl->first_slot;
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
- list_add(&slot->slot_list, &ctrl->slot_list);
+ ctrl->slot = slot;
return 0;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
- struct slot *slot;
- slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list);
- list_del(&slot->slot_list);
+ struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
flush_scheduled_work();
flush_workqueue(pciehp_wq);
@@ -951,7 +920,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
{
int i;
u16 reg16;
- struct pci_dev *pdev = ctrl->pci_dev;
+ struct pci_dev *pdev = ctrl->pcie->port;
if (!pciehp_debug)
return;
@@ -974,7 +943,7 @@ static inline void dbg_ctrl(struct controller *ctrl)
(unsigned long long)pci_resource_start(pdev, i));
}
ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
- ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot);
+ ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
ctrl_info(ctrl, " Attention Button : %3s\n",
ATTN_BUTTN(ctrl) ? "yes" : "no");
ctrl_info(ctrl, " Power Controller : %3s\n",
@@ -1008,10 +977,7 @@ struct controller *pcie_init(struct pcie_device *dev)
dev_err(&dev->device, "%s: Out of memory\n", __func__);
goto abort;
}
- INIT_LIST_HEAD(&ctrl->slot_list);
-
ctrl->pcie = dev;
- ctrl->pci_dev = pdev;
ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!ctrl->cap_base) {
ctrl_err(ctrl, "Cannot find PCI Express capability\n");
@@ -1023,11 +989,6 @@ struct controller *pcie_init(struct pcie_device *dev)
}
ctrl->slot_cap = slot_cap;
- ctrl->first_slot = slot_cap >> 19;
- ctrl->slot_device_offset = 0;
- ctrl->num_slots = 1;
- ctrl->hpc_ops = &pciehp_hpc_ops;
- mutex_init(&ctrl->crit_sect);
mutex_init(&ctrl->ctrl_lock);
init_waitqueue_head(&ctrl->queue);
dbg_ctrl(ctrl);
@@ -1083,7 +1044,7 @@ abort:
return NULL;
}
-void pcie_release_ctrl(struct controller *ctrl)
+void pciehp_release_ctrl(struct controller *ctrl)
{
pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 10f9566cceeb..21733108adde 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -34,136 +34,6 @@
#include "../pci.h"
#include "pciehp.h"
-static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
-{
- u16 pci_cmd, pci_bctl;
-
- if (hpp->revision > 1) {
- warn("Rev.%d type0 record not supported\n", hpp->revision);
- return;
- }
-
- pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (hpp->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (hpp->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- hpp->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (hpp->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (hpp->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- }
-}
-
-static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
-{
- int pos;
- u16 reg16;
- u32 reg32;
-
- if (hpp->revision > 1) {
- warn("Rev.%d type2 record not supported\n", hpp->revision);
- return;
- }
-
- /* Find PCI Express capability */
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (!pos)
- return;
-
- /* Initialize Device Control Register */
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
- reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
-
- /* Initialize Link Control Register */
- if (dev->subordinate) {
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
- reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
- | hpp->pci_exp_lnkctl_or;
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
- }
-
- /* Find Advanced Error Reporting Enhanced Capability */
- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
- if (!pos)
- return;
-
- /* Initialize Uncorrectable Error Mask Register */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
- reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
-
- /* Initialize Uncorrectable Error Severity Register */
- pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
- reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
- pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
-
- /* Initialize Correctable Error Mask Register */
- pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
- reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
- pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
-
- /* Initialize Advanced Error Capabilities and Control Register */
- pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
- reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
- pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
-
- /*
- * FIXME: The following two registers are not supported yet.
- *
- * o Secondary Uncorrectable Error Severity Register
- * o Secondary Uncorrectable Error Mask Register
- */
-}
-
-static void program_fw_provided_values(struct pci_dev *dev)
-{
- struct pci_dev *cdev;
- struct hotplug_params hpp;
-
- /* Program hpp values for this device */
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- if (pciehp_get_hp_params_from_firmware(dev, &hpp)) {
- warn("Could not get hotplug parameters\n");
- return;
- }
-
- if (hpp.t2)
- program_hpp_type2(dev, hpp.t2);
- if (hpp.t0)
- program_hpp_type0(dev, hpp.t0);
-
- /* Program child devices */
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- program_fw_provided_values(cdev);
- }
-}
-
static int __ref pciehp_add_bridge(struct pci_dev *dev)
{
struct pci_bus *parent = dev->bus;
@@ -193,27 +63,27 @@ static int __ref pciehp_add_bridge(struct pci_dev *dev)
int pciehp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
- struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
int num, fn;
struct controller *ctrl = p_slot->ctrl;
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
if (dev) {
ctrl_err(ctrl, "Device %s already exists "
- "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev),
- pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ "at %04x:%02x:00, cannot hot-add\n", pci_name(dev),
+ pci_domain_nr(parent), parent->number);
pci_dev_put(dev);
return -EINVAL;
}
- num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ num = pci_scan_slot(parent, PCI_DEVFN(0, 0));
if (num == 0) {
ctrl_err(ctrl, "No new device found\n");
return -ENODEV;
}
for (fn = 0; fn < 8; fn++) {
- dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
+ dev = pci_get_slot(parent, PCI_DEVFN(0, fn));
if (!dev)
continue;
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
@@ -226,7 +96,7 @@ int pciehp_configure_device(struct slot *p_slot)
(dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
pciehp_add_bridge(dev);
}
- program_fw_provided_values(dev);
+ pci_configure_slot(dev);
pci_dev_put(dev);
}
@@ -241,19 +111,18 @@ int pciehp_unconfigure_device(struct slot *p_slot)
int j;
u8 bctl = 0;
u8 presence = 0;
- struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
u16 command;
struct controller *ctrl = p_slot->ctrl;
- ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
- __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
- ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence);
+ ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
+ __func__, pci_domain_nr(parent), parent->number);
+ ret = pciehp_get_adapter_status(p_slot, &presence);
if (ret)
presence = 0;
for (j = 0; j < 8; j++) {
- struct pci_dev* temp = pci_get_slot(parent,
- (p_slot->device << 3) | j);
+ struct pci_dev* temp = pci_get_slot(parent, PCI_DEVFN(0, j));
if (!temp)
continue;
if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
@@ -285,11 +154,6 @@ int pciehp_unconfigure_device(struct slot *p_slot)
}
pci_dev_put(temp);
}
- /*
- * Some PCI Express root ports require fixup after hot-plug operation.
- */
- if (pcie_mch_quirk)
- pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev);
return rc;
}
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
new file mode 100644
index 000000000000..cc8ec3aa41a7
--- /dev/null
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+
+static struct hpp_type0 pci_default_type0 = {
+ .revision = 1,
+ .cache_line_size = 8,
+ .latency_timer = 0x40,
+ .enable_serr = 0,
+ .enable_perr = 0,
+};
+
+static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
+{
+ u16 pci_cmd, pci_bctl;
+
+ if (!hpp) {
+ /*
+ * Perhaps we *should* use default settings for PCIe, but
+ * pciehp didn't, so we won't either.
+ */
+ if (dev->is_pcie)
+ return;
+ dev_info(&dev->dev, "using default PCI settings\n");
+ hpp = &pci_default_type0;
+ }
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev,
+ "PCI settings rev %d not supported; using defaults\n",
+ hpp->revision);
+ hpp = &pci_default_type0;
+ }
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
+ pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
+ if (hpp->enable_serr)
+ pci_cmd |= PCI_COMMAND_SERR;
+ else
+ pci_cmd &= ~PCI_COMMAND_SERR;
+ if (hpp->enable_perr)
+ pci_cmd |= PCI_COMMAND_PARITY;
+ else
+ pci_cmd &= ~PCI_COMMAND_PARITY;
+ pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
+
+ /* Program bridge control value */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
+ hpp->latency_timer);
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
+ if (hpp->enable_serr)
+ pci_bctl |= PCI_BRIDGE_CTL_SERR;
+ else
+ pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
+ if (hpp->enable_perr)
+ pci_bctl |= PCI_BRIDGE_CTL_PARITY;
+ else
+ pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
+ }
+}
+
+static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+{
+ if (hpp)
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
+}
+
+static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+{
+ int pos;
+ u16 reg16;
+ u32 reg32;
+
+ if (!hpp)
+ return;
+
+ /* Find PCI Express capability */
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
+ hpp->revision);
+ return;
+ }
+
+ /* Initialize Device Control Register */
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &reg16);
+ reg16 = (reg16 & hpp->pci_exp_devctl_and) | hpp->pci_exp_devctl_or;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, reg16);
+
+ /* Initialize Link Control Register */
+ if (dev->subordinate) {
+ pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &reg16);
+ reg16 = (reg16 & hpp->pci_exp_lnkctl_and)
+ | hpp->pci_exp_lnkctl_or;
+ pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Find Advanced Error Reporting Enhanced Capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return;
+
+ /* Initialize Uncorrectable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
+ reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
+
+ /* Initialize Uncorrectable Error Severity Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
+ reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
+
+ /* Initialize Correctable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
+ reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
+
+ /* Initialize Advanced Error Capabilities and Control Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ /*
+ * FIXME: The following two registers are not supported yet.
+ *
+ * o Secondary Uncorrectable Error Severity Register
+ * o Secondary Uncorrectable Error Mask Register
+ */
+}
+
+void pci_configure_slot(struct pci_dev *dev)
+{
+ struct pci_dev *cdev;
+ struct hotplug_params hpp;
+ int ret;
+
+ if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
+ (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
+ return;
+
+ memset(&hpp, 0, sizeof(hpp));
+ ret = pci_get_hp_params(dev, &hpp);
+ if (ret)
+ dev_warn(&dev->dev, "no hotplug settings from platform\n");
+
+ program_hpp_type2(dev, hpp.t2);
+ program_hpp_type1(dev, hpp.t1);
+ program_hpp_type0(dev, hpp.t0);
+
+ if (dev->subordinate) {
+ list_for_each_entry(cdev, &dev->subordinate->devices,
+ bus_list)
+ pci_configure_slot(cdev);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 974e924ca96d..bd588eb8e922 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -188,21 +188,12 @@ static inline const char *slot_name(struct slot *slot)
#ifdef CONFIG_ACPI
#include <linux/pci-acpi.h>
-static inline int get_hp_params_from_firmware(struct pci_dev *dev,
- struct hotplug_params *hpp)
-{
- if (ACPI_FAILURE(acpi_get_hp_params_from_firmware(dev->bus, hpp)))
- return -ENODEV;
- return 0;
-}
-
static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
#else
-#define get_hp_params_from_firmware(dev, hpp) (-ENODEV)
#define get_hp_hw_control_from_firmware(dev) (0)
#endif
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index aa315e52529b..8c3d3219f227 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -34,66 +34,6 @@
#include "../pci.h"
#include "shpchp.h"
-static void program_fw_provided_values(struct pci_dev *dev)
-{
- u16 pci_cmd, pci_bctl;
- struct pci_dev *cdev;
- struct hotplug_params hpp;
-
- /* Program hpp values for this device */
- if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
- return;
-
- /* use default values if we can't get them from firmware */
- if (get_hp_params_from_firmware(dev, &hpp) ||
- !hpp.t0 || (hpp.t0->revision > 1)) {
- warn("Could not get hotplug parameters. Use defaults\n");
- hpp.t0 = &hpp.type0_data;
- hpp.t0->revision = 0;
- hpp.t0->cache_line_size = 8;
- hpp.t0->latency_timer = 0x40;
- hpp.t0->enable_serr = 0;
- hpp.t0->enable_perr = 0;
- }
-
- pci_write_config_byte(dev,
- PCI_CACHE_LINE_SIZE, hpp.t0->cache_line_size);
- pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
- if (hpp.t0->enable_serr)
- pci_cmd |= PCI_COMMAND_SERR;
- else
- pci_cmd &= ~PCI_COMMAND_SERR;
- if (hpp.t0->enable_perr)
- pci_cmd |= PCI_COMMAND_PARITY;
- else
- pci_cmd &= ~PCI_COMMAND_PARITY;
- pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
- /* Program bridge control value and child devices */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
- hpp.t0->latency_timer);
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
- if (hpp.t0->enable_serr)
- pci_bctl |= PCI_BRIDGE_CTL_SERR;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
- if (hpp.t0->enable_perr)
- pci_bctl |= PCI_BRIDGE_CTL_PARITY;
- else
- pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
- if (dev->subordinate) {
- list_for_each_entry(cdev, &dev->subordinate->devices,
- bus_list)
- program_fw_provided_values(cdev);
- }
- }
-}
-
int __ref shpchp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
@@ -153,7 +93,7 @@ int __ref shpchp_configure_device(struct slot *p_slot)
child->subordinate = pci_do_scan_bus(child);
pci_bus_size_bridges(child);
}
- program_fw_provided_values(dev);
+ pci_configure_slot(dev);
pci_dev_put(dev);
}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 2314ad7ee5fe..b1e97e682500 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -37,6 +37,8 @@
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/sysdev.h>
+#include <linux/tboot.h>
+#include <linux/dmi.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include "pci.h"
@@ -46,6 +48,7 @@
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
+#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
#define IOAPIC_RANGE_START (0xfee00000)
#define IOAPIC_RANGE_END (0xfeefffff)
@@ -55,8 +58,14 @@
#define MAX_AGAW_WIDTH 64
-#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
-#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
+#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
+
+/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
+ to match. That way, we can use 'unsigned long' for PFNs with impunity. */
+#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
+ __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
+#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
@@ -86,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p)
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
+static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
@@ -251,7 +261,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
* 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful.
*/
-struct dmar_domain *si_domain;
+static struct dmar_domain *si_domain;
+static int hw_pass_through = 1;
/* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
@@ -727,7 +738,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (cmpxchg64(&pte->val, 0ULL, pteval)) {
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -777,9 +788,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
- while (start_pfn <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
if (!pte) {
start_pfn = align_to_level(start_pfn + 1, 2);
@@ -793,7 +805,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+
+ } while (start_pfn && start_pfn <= last_pfn);
}
/* free page table pages. last level pte should already be cleared */
@@ -809,6 +822,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
+ BUG_ON(start_pfn > last_pfn);
/* We don't need lock here; nobody else touches the iova range */
level = 2;
@@ -819,7 +833,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
if (tmp + level_size(level) - 1 > last_pfn)
return;
- while (tmp + level_size(level) - 1 <= last_pfn) {
+ do {
first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
@@ -838,7 +852,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
- }
+ } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
level++;
}
/* free pgd */
@@ -1157,6 +1171,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
pr_debug("Number of Domains supportd <%ld>\n", ndomains);
nlongs = BITS_TO_LONGS(ndomains);
+ spin_lock_init(&iommu->lock);
+
/* TBD: there might be 64K domains,
* consider other allocation for future chip
*/
@@ -1169,12 +1185,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
GFP_KERNEL);
if (!iommu->domains) {
printk(KERN_ERR "Allocating domain array failed\n");
- kfree(iommu->domain_ids);
return -ENOMEM;
}
- spin_lock_init(&iommu->lock);
-
/*
* if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it.
@@ -1194,22 +1207,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
int i;
unsigned long flags;
- i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
- for (; i < cap_ndoms(iommu->cap); ) {
- domain = iommu->domains[i];
- clear_bit(i, iommu->domain_ids);
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
+ for (; i < cap_ndoms(iommu->cap); ) {
+ domain = iommu->domains[i];
+ clear_bit(i, iommu->domain_ids);
+
+ spin_lock_irqsave(&domain->iommu_lock, flags);
+ if (--domain->iommu_count == 0) {
+ if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
+ vm_domain_exit(domain);
+ else
+ domain_exit(domain);
+ }
+ spin_unlock_irqrestore(&domain->iommu_lock, flags);
- spin_lock_irqsave(&domain->iommu_lock, flags);
- if (--domain->iommu_count == 0) {
- if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
- vm_domain_exit(domain);
- else
- domain_exit(domain);
+ i = find_next_bit(iommu->domain_ids,
+ cap_ndoms(iommu->cap), i+1);
}
- spin_unlock_irqrestore(&domain->iommu_lock, flags);
-
- i = find_next_bit(iommu->domain_ids,
- cap_ndoms(iommu->cap), i+1);
}
if (iommu->gcmd & DMA_GCMD_TE)
@@ -1309,7 +1324,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
}
static struct iova_domain reserved_iova_list;
-static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key;
static void dmar_init_reserved_ranges(void)
@@ -1320,8 +1334,6 @@ static void dmar_init_reserved_ranges(void)
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
- lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
- &reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@@ -1924,6 +1936,9 @@ error:
}
static int iommu_identity_mapping;
+#define IDENTMAP_ALL 1
+#define IDENTMAP_GFX 2
+#define IDENTMAP_AZALIA 4
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
@@ -1958,14 +1973,35 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
struct dmar_domain *domain;
int ret;
- printk(KERN_INFO
- "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
- pci_name(pdev), start, end);
-
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
+ /* For _hardware_ passthrough, don't bother. But for software
+ passthrough, we do it anyway -- it may indicate a memory
+ range which is reserved in E820, so which didn't get set
+ up to start with in si_domain */
+ if (domain == si_domain && hw_pass_through) {
+ printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+ return 0;
+ }
+
+ printk(KERN_INFO
+ "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
+ pci_name(pdev), start, end);
+
+ if (end >> agaw_to_width(domain->agaw)) {
+ WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ agaw_to_width(domain->agaw),
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ ret = -EIO;
+ goto error;
+ }
+
ret = iommu_domain_identity_map(domain, start, end);
if (ret)
goto error;
@@ -2016,23 +2052,6 @@ static inline void iommu_prepare_isa(void)
}
#endif /* !CONFIG_DMAR_FLPY_WA */
-/* Initialize each context entry as pass through.*/
-static int __init init_context_pass_through(void)
-{
- struct pci_dev *pdev = NULL;
- struct dmar_domain *domain;
- int ret;
-
- for_each_pci_dev(pdev) {
- domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- ret = domain_context_mapping(domain, pdev,
- CONTEXT_TT_PASS_THROUGH);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_work_fn(unsigned long start_pfn,
@@ -2047,7 +2066,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
}
-static int si_domain_init(void)
+static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
@@ -2074,6 +2093,9 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
+ if (hw)
+ return 0;
+
for_each_online_node(nid) {
work_with_active_regions(nid, si_domain_work_fn, &ret);
if (ret)
@@ -2100,15 +2122,23 @@ static int identity_mapping(struct pci_dev *pdev)
}
static int domain_add_dev_info(struct dmar_domain *domain,
- struct pci_dev *pdev)
+ struct pci_dev *pdev,
+ int translation)
{
struct device_domain_info *info;
unsigned long flags;
+ int ret;
info = alloc_devinfo_mem();
if (!info)
return -ENOMEM;
+ ret = domain_context_mapping(domain, pdev, translation);
+ if (ret) {
+ free_devinfo_mem(info);
+ return ret;
+ }
+
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
@@ -2126,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain,
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
{
- if (iommu_identity_mapping == 2)
- return IS_GFX_DEVICE(pdev);
+ if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
+ return 1;
+
+ if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
+ return 1;
+
+ if (!(iommu_identity_mapping & IDENTMAP_ALL))
+ return 0;
/*
* We want to start off with all devices in the 1:1 domain, and
@@ -2165,27 +2201,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
return 1;
}
-static int iommu_prepare_static_identity_mapping(void)
+static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
int ret;
- ret = si_domain_init();
+ ret = si_domain_init(hw);
if (ret)
return -EFAULT;
for_each_pci_dev(pdev) {
if (iommu_should_identity_map(pdev, 1)) {
- printk(KERN_INFO "IOMMU: identity mapping for device %s\n",
- pci_name(pdev));
+ printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
+ hw ? "hardware" : "software", pci_name(pdev));
- ret = domain_context_mapping(si_domain, pdev,
+ ret = domain_add_dev_info(si_domain, pdev,
+ hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL);
if (ret)
return ret;
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return ret;
}
}
@@ -2199,14 +2233,6 @@ int __init init_dmars(void)
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
- int pass_through = 1;
-
- /*
- * In case pass through can not be enabled, iommu tries to use identity
- * mapping.
- */
- if (iommu_pass_through)
- iommu_identity_mapping = 1;
/*
* for each drhd
@@ -2234,7 +2260,6 @@ int __init init_dmars(void)
deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) {
- kfree(g_iommus);
ret = -ENOMEM;
goto error;
}
@@ -2261,14 +2286,8 @@ int __init init_dmars(void)
goto error;
}
if (!ecap_pass_through(iommu->ecap))
- pass_through = 0;
+ hw_pass_through = 0;
}
- if (iommu_pass_through)
- if (!pass_through) {
- printk(KERN_INFO
- "Pass Through is not supported by hardware.\n");
- iommu_pass_through = 0;
- }
/*
* Start from the sane iommu hardware state.
@@ -2323,64 +2342,60 @@ int __init init_dmars(void)
}
}
+ if (iommu_pass_through)
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
+#ifdef CONFIG_DMAR_BROKEN_GFX_WA
+ iommu_identity_mapping |= IDENTMAP_GFX;
+#endif
+
+ check_tylersburg_isoch();
+
/*
- * If pass through is set and enabled, context entries of all pci
- * devices are intialized by pass through translation type.
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa and may fall back to static
+ * identity mapping if iommu_identity_mapping is set.
*/
- if (iommu_pass_through) {
- ret = init_context_pass_through();
+ if (iommu_identity_mapping) {
+ ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (ret) {
- printk(KERN_ERR "IOMMU: Pass through init failed.\n");
- iommu_pass_through = 0;
+ printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
+ goto error;
}
}
-
/*
- * If pass through is not set or not enabled, setup context entries for
- * identity mappings for rmrr, gfx, and isa and may fall back to static
- * identity mapping if iommu_identity_mapping is set.
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
*/
- if (!iommu_pass_through) {
-#ifdef CONFIG_DMAR_BROKEN_GFX_WA
- if (!iommu_identity_mapping)
- iommu_identity_mapping = 2;
-#endif
- if (iommu_identity_mapping)
- iommu_prepare_static_identity_mapping();
- /*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
- */
- printk(KERN_INFO "IOMMU: Setting RMRR:\n");
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /*
- * some BIOS lists non-exist devices in DMAR
- * table.
- */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
- }
+ printk(KERN_INFO "IOMMU: Setting RMRR:\n");
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
}
-
- iommu_prepare_isa();
}
+ iommu_prepare_isa();
+
/*
* for each drhd
* enable fault log
@@ -2403,11 +2418,12 @@ int __init init_dmars(void)
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
- iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
if (ret)
goto error;
+
+ iommu_disable_protect_mem_regions(iommu);
}
return 0;
@@ -2454,8 +2470,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
return iova;
}
-static struct dmar_domain *
-get_valid_domain_for_dev(struct pci_dev *pdev)
+static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
{
struct dmar_domain *domain;
int ret;
@@ -2483,6 +2498,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain;
}
+static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
+{
+ struct device_domain_info *info;
+
+ /* No lock here, assumes no domain exit in normal case */
+ info = dev->dev.archdata.iommu;
+ if (likely(info))
+ return info->domain;
+
+ return __get_valid_domain_for_dev(dev);
+}
+
static int iommu_dummy(struct pci_dev *pdev)
{
return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
@@ -2525,10 +2552,10 @@ static int iommu_no_mapping(struct device *dev)
*/
if (iommu_should_identity_map(pdev, 0)) {
int ret;
- ret = domain_add_dev_info(si_domain, pdev);
- if (ret)
- return 0;
- ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
+ ret = domain_add_dev_info(si_domain, pdev,
+ hw_pass_through ?
+ CONTEXT_TT_PASS_THROUGH :
+ CONTEXT_TT_MULTI_LEVEL);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev));
@@ -2637,10 +2664,9 @@ static void flush_unmaps(void)
unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j];
- mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
- mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- iova->pfn_lo << PAGE_SHIFT, mask);
+ (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
__free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
deferred_flush[i].next = 0;
@@ -2733,12 +2759,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
}
}
-static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
- int dir)
-{
- intel_unmap_page(dev, dev_addr, size, dir, NULL);
-}
-
static void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
@@ -2771,7 +2791,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL);
+ intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, order);
}
@@ -2807,11 +2827,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */
dma_pte_free_pagetable(domain, start_pfn, last_pfn);
- iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
- (last_pfn - start_pfn + 1));
-
- /* free iova */
- __free_iova(&domain->iovad, iova);
+ if (intel_iommu_strict) {
+ iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+ last_pfn - start_pfn + 1);
+ /* free iova */
+ __free_iova(&domain->iovad, iova);
+ } else {
+ add_unmap(domain, iova);
+ /*
+ * queue up the release of the unmap to save the 1/6th of the
+ * cpu used up by the iotlb flush operation...
+ */
+ }
}
static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3055,8 +3082,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
+ iommu_disable_protect_mem_regions(iommu);
}
return 0;
@@ -3183,18 +3210,28 @@ static int __init init_iommu_sysfs(void)
int __init intel_iommu_init(void)
{
int ret = 0;
+ int force_on = 0;
+
+ /* VT-d is required for a TXT/tboot launch, so enforce that */
+ force_on = tboot_force_iommu();
- if (dmar_table_init())
+ if (dmar_table_init()) {
+ if (force_on)
+ panic("tboot: Failed to initialize DMAR table\n");
return -ENODEV;
+ }
- if (dmar_dev_scope_init())
+ if (dmar_dev_scope_init()) {
+ if (force_on)
+ panic("tboot: Failed to initialize DMAR device scope\n");
return -ENODEV;
+ }
/*
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
+ if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
@@ -3204,6 +3241,8 @@ int __init intel_iommu_init(void)
ret = init_dmars();
if (ret) {
+ if (force_on)
+ panic("tboot: Failed to initialize DMARs\n");
printk(KERN_ERR "IOMMU: dmar init failed\n");
put_iova_domain(&reserved_iova_list);
iommu_exit_mempool();
@@ -3214,14 +3253,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer);
force_iommu = 1;
-
- if (!iommu_pass_through) {
- printk(KERN_INFO
- "Multi-level page-table translation for DMAR.\n");
- dma_ops = &intel_dma_ops;
- } else
- printk(KERN_INFO
- "DMAR: Pass through translation for DMAR.\n");
+ dma_ops = &intel_dma_ops;
init_iommu_sysfs();
@@ -3504,7 +3536,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct intel_iommu *iommu;
int addr_width;
u64 end;
- int ret;
/* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) {
@@ -3536,12 +3567,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT;
}
- ret = domain_add_dev_info(dmar_domain, pdev);
- if (ret)
- return ret;
-
- ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
- return ret;
+ return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
@@ -3658,3 +3684,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
+
+/* On Tylersburg chipsets, some BIOSes have been known to enable the
+ ISOCH DMAR unit for the Azalia sound device, but not give it any
+ TLB entries, which causes it to deadlock. Check for that. We do
+ this in a function called from init_dmars(), instead of in a PCI
+ quirk, because we don't want to print the obnoxious "BIOS broken"
+ message if VT-d is actually disabled.
+*/
+static void __init check_tylersburg_isoch(void)
+{
+ struct pci_dev *pdev;
+ uint32_t vtisochctrl;
+
+ /* If there's no Azalia in the system anyway, forget it. */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
+ if (!pdev)
+ return;
+ pci_dev_put(pdev);
+
+ /* System Management Registers. Might be hidden, in which case
+ we can't do the sanity check. But that's OK, because the
+ known-broken BIOSes _don't_ actually hide it, so far. */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
+ if (!pdev)
+ return;
+
+ if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
+ pci_dev_put(pdev);
+
+ /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
+ if (vtisochctrl & 1)
+ return;
+
+ /* Drop all bits other than the number of TLB entries */
+ vtisochctrl &= 0x1c;
+
+ /* If we have the recommended number of TLB entries (16), fine. */
+ if (vtisochctrl == 0x10)
+ return;
+
+ /* Zero TLB entries? You get to ride the short bus to school. */
+ if (!vtisochctrl) {
+ WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
+ "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
+ dmi_get_system_info(DMI_BIOS_VENDOR),
+ dmi_get_system_info(DMI_BIOS_VERSION),
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
+ iommu_identity_mapping |= IDENTMAP_AZALIA;
+ return;
+ }
+
+ printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
+ vtisochctrl);
+}
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 44803644ca05..0ed78a764ded 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if (disable_intremap)
return 0;
+ if (!dmar_ir_support())
+ return 0;
+
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct dmar_drhd_unit *drhd;
int setup = 0;
+ if (parse_ioapics_under_ir() != 1) {
+ printk(KERN_INFO "Not enable interrupt remapping\n");
+ return -1;
+ }
+
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index 46dd440e2315..7914951ef29a 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -22,7 +22,6 @@
void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{
- spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn,
bool size_aligned)
{
- unsigned long flags;
struct iova *new_iova;
int ret;
@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned)
size = __roundup_pow_of_two(size);
- spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);
- spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) {
free_iova_mem(new_iova);
return NULL;
@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova;
unsigned int overlap = 0;
- spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
- spin_lock(&iovad->iova_rbtree_lock);
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node);
@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish:
- spin_unlock(&iovad->iova_rbtree_lock);
- spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return iova;
}
@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags;
struct rb_node *node;
- spin_lock_irqsave(&from->iova_alloc_lock, flags);
- spin_lock(&from->iova_rbtree_lock);
+ spin_lock_irqsave(&from->iova_rbtree_lock, flags);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova;
@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo);
}
- spin_unlock(&from->iova_rbtree_lock);
- spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
+ spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c
new file mode 100644
index 000000000000..871f65c15936
--- /dev/null
+++ b/drivers/pci/legacy.c
@@ -0,0 +1,34 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include "pci.h"
+
+/**
+ * pci_find_device - begin or continue searching for a PCI device by vendor/device id
+ * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
+ * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is found
+ * with a matching @vendor and @device, a pointer to its device structure is
+ * returned. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * Otherwise if @from is not %NULL, searches continue from next device
+ * on the global list.
+ *
+ * NOTE: Do not use this function any more; use pci_get_device() instead, as
+ * the PCI device returned by this function can disappear at any moment in
+ * time.
+ */
+struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from)
+{
+ struct pci_dev *pdev;
+
+ pci_dev_get(from);
+ pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
+ pci_dev_put(pdev);
+ return pdev;
+}
+EXPORT_SYMBOL(pci_find_device);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d986afb7032b..f9cf3173b23d 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -16,9 +16,8 @@
#include <linux/proc_fs.h>
#include <linux/msi.h>
#include <linux/smp.h>
-
-#include <asm/errno.h>
-#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/io.h>
#include "pci.h"
#include "msi.h"
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
write_msi_msg_desc(desc, msg);
}
-static int msi_free_irqs(struct pci_dev* dev);
+static void free_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry, *tmp;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int i, nvec;
+ if (!entry->irq)
+ continue;
+ nvec = 1 << entry->msi_attrib.multiple;
+ for (i = 0; i < nvec; i++)
+ BUG_ON(irq_has_action(entry->irq + i));
+ }
+
+ arch_teardown_msi_irqs(dev);
+
+ list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ if (entry->msi_attrib.is_msix) {
+ if (list_is_last(&entry->list, &dev->msi_list))
+ iounmap(entry->mask_base);
+ }
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
{
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
pos = entry->msi_attrib.pos;
pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
u16 control;
unsigned mask;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
if (!entry)
return -ENOMEM;
- entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = is_64bit_address(control);
- entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.is_msix = 0;
+ entry->msi_attrib.is_64 = is_64bit_address(control);
+ entry->msi_attrib.entry_nr = 0;
+ entry->msi_attrib.maskbit = is_mask_bit_support(control);
+ entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
+ entry->msi_attrib.pos = pos;
entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
msi_mask_irq(entry, mask, ~mask);
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
return ret;
}
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return 0;
}
-/**
- * msix_capability_init - configure device's MSI-X capability
- * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of struct msix_entry entries
- * @nvec: number of @entries
- *
- * Setup the MSI-X capability structure of device function with a
- * single MSI-X irq. A return of zero indicates the successful setup of
- * requested MSI-X entries with allocated irqs or non-zero for otherwise.
- **/
-static int msix_capability_init(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
+ unsigned nr_entries)
{
- struct msi_desc *entry;
- int pos, i, j, nr_entries, ret;
unsigned long phys_addr;
u32 table_offset;
- u16 control;
u8 bir;
- void __iomem *base;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
-
- /* Ensure MSI-X is disabled while it is set up */
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
-
- /* Request & Map MSI-X table region */
- nr_entries = multi_msix_capable(control);
-
- pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
+ pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
- phys_addr = pci_resource_start (dev, bir) + table_offset;
- base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- if (base == NULL)
- return -ENOMEM;
+ phys_addr = pci_resource_start(dev, bir) + table_offset;
+
+ return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+}
+
+static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
+ void __iomem *base, struct msix_entry *entries,
+ int nvec)
+{
+ struct msi_desc *entry;
+ int i;
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev,
if (!i)
iounmap(base);
else
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
/* No enough memory. Don't try again */
return -ENOMEM;
}
- j = entries[i].entry;
- entry->msi_attrib.is_msix = 1;
- entry->msi_attrib.is_64 = 1;
- entry->msi_attrib.entry_nr = j;
- entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = pos;
- entry->mask_base = base;
+ entry->msi_attrib.is_msix = 1;
+ entry->msi_attrib.is_64 = 1;
+ entry->msi_attrib.entry_nr = entries[i].entry;
+ entry->msi_attrib.default_irq = dev->irq;
+ entry->msi_attrib.pos = pos;
+ entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
}
- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
- if (ret < 0) {
- /* If we had some success report the number of irqs
- * we succeeded in setting up. */
- int avail = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
- if (entry->irq != 0) {
- avail++;
- }
- }
+ return 0;
+}
- if (avail != 0)
- ret = avail;
+static void msix_program_entries(struct pci_dev *dev,
+ struct msix_entry *entries)
+{
+ struct msi_desc *entry;
+ int i = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ entries[i].vector = entry->irq;
+ set_irq_msi(entry->irq, entry);
+ entry->masked = readl(entry->mask_base + offset);
+ msix_mask_irq(entry, 1);
+ i++;
}
+}
- if (ret) {
- msi_free_irqs(dev);
+/**
+ * msix_capability_init - configure device's MSI-X capability
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of struct msix_entry entries
+ * @nvec: number of @entries
+ *
+ * Setup the MSI-X capability structure of device function with a
+ * single MSI-X irq. A return of zero indicates the successful setup of
+ * requested MSI-X entries with allocated irqs or non-zero for otherwise.
+ **/
+static int msix_capability_init(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int pos, ret;
+ u16 control;
+ void __iomem *base;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
+ /* Request & Map MSI-X table region */
+ base = msix_map_region(dev, pos, multi_msix_capable(control));
+ if (!base)
+ return -ENOMEM;
+
+ ret = msix_setup_entries(dev, pos, base, entries, nvec);
+ if (ret)
return ret;
- }
+
+ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+ goto error;
/*
* Some devices require MSI-X to be enabled before we can touch the
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev,
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
- entries[i].vector = entry->irq;
- set_irq_msi(entry->irq, entry);
- j = entries[i].entry;
- entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- msix_mask_irq(entry, 1);
- i++;
- }
+ msix_program_entries(dev, entries);
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev,
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
+
+error:
+ if (ret < 0) {
+ /*
+ * If we had some success, report the number of irqs
+ * we succeeded in setting up.
+ */
+ struct msi_desc *entry;
+ int avail = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (entry->irq != 0)
+ avail++;
+ }
+ if (avail != 0)
+ ret = avail;
+ }
+
+ free_msi_irqs(dev);
+
+ return ret;
}
/**
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev,
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 0, else return an error code.
**/
-static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
+static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
{
struct pci_bus *bus;
int ret;
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
if (nvec < 1)
return -ERANGE;
- /* Any bridge which does NOT route MSI transactions from it's
- * secondary bus to it's primary bus must set NO_MSI flag on
+ /*
+ * Any bridge which does NOT route MSI transactions from its
+ * secondary bus to its primary bus must set NO_MSI flag on
* the secondary pci_bus.
* We expect only arch-specific PCI host bus controller driver
* or quirks for specific PCI bridges to be setting NO_MSI.
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev)
dev->irq = desc->msi_attrib.default_irq;
}
-void pci_disable_msi(struct pci_dev* dev)
+void pci_disable_msi(struct pci_dev *dev)
{
- struct msi_desc *entry;
-
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
pci_msi_shutdown(dev);
-
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- if (entry->msi_attrib.is_msix)
- return;
-
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
}
EXPORT_SYMBOL(pci_disable_msi);
-static int msi_free_irqs(struct pci_dev* dev)
-{
- struct msi_desc *entry, *tmp;
-
- list_for_each_entry(entry, &dev->msi_list, list) {
- int i, nvec;
- if (!entry->irq)
- continue;
- nvec = 1 << entry->msi_attrib.multiple;
- for (i = 0; i < nvec; i++)
- BUG_ON(irq_has_action(entry->irq + i));
- }
-
- arch_teardown_msi_irqs(dev);
-
- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
- if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, &dev->msi_list))
- iounmap(entry->mask_base);
- }
- list_del(&entry->list);
- kfree(entry);
- }
-
- return 0;
-}
-
/**
* pci_msix_table_size - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev)
* of irqs or MSI-X vectors available. Driver should use the returned value to
* re-send its request.
**/
-int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
int status, nr_entries;
int i, j;
if (!entries)
- return -EINVAL;
+ return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
if (status)
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
- if (dev->msi_enabled) {
+ if (dev->msi_enabled) {
dev_info(&dev->dev, "can't enable MSI-X "
"(MSI IRQ already assigned)\n");
return -EINVAL;
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
}
EXPORT_SYMBOL(pci_enable_msix);
-static void msix_free_all_irqs(struct pci_dev *dev)
-{
- msi_free_irqs(dev);
-}
-
-void pci_msix_shutdown(struct pci_dev* dev)
+void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev)
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
}
-void pci_disable_msix(struct pci_dev* dev)
+
+void pci_disable_msix(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
pci_msix_shutdown(dev);
-
- msix_free_all_irqs(dev);
+ free_msi_irqs(dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix);
* allocated for this device function, are reclaimed to unused state,
* which may be used later on.
**/
-void msi_remove_pci_irq_vectors(struct pci_dev* dev)
+void msi_remove_pci_irq_vectors(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev)
- return;
-
- if (dev->msi_enabled)
- msi_free_irqs(dev);
+ return;
- if (dev->msix_enabled)
- msix_free_all_irqs(dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ free_msi_irqs(dev);
}
void pci_no_msi(void)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ea15b0537457..33317df47699 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -109,15 +109,32 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
return handle ? acpi_bus_can_wakeup(handle) : false;
}
+static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
+{
+ while (bus->parent) {
+ struct pci_dev *bridge = bus->self;
+ int ret;
+
+ ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
+ if (!ret || bridge->is_pcie)
+ return;
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ acpi_pm_device_sleep_wake(bus->bridge, enable);
+}
+
static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
- int error = acpi_pm_device_sleep_wake(&dev->dev, enable);
+ if (acpi_pci_can_wakeup(dev))
+ return acpi_pm_device_sleep_wake(&dev->dev, enable);
- if (!error)
- dev_printk(KERN_INFO, &dev->dev,
- "wake-up capability %s by ACPI\n",
- enable ? "enabled" : "disabled");
- return error;
+ if (!dev->is_pcie)
+ acpi_pci_propagate_wakeup_enable(dev->bus, enable);
+
+ return 0;
}
static struct pci_platform_pm_ops acpi_pci_platform_pm = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a7eb7277b106..e5d47be3c6d7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,37 +19,98 @@
#include <linux/cpu.h>
#include "pci.h"
-/*
- * Dynamic device IDs are disabled for !CONFIG_HOTPLUG
- */
-
struct pci_dynid {
struct list_head node;
struct pci_device_id id;
};
-#ifdef CONFIG_HOTPLUG
+/**
+ * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
+ * @drv: target pci driver
+ * @vendor: PCI vendor ID
+ * @device: PCI device ID
+ * @subvendor: PCI subvendor ID
+ * @subdevice: PCI subdevice ID
+ * @class: PCI class
+ * @class_mask: PCI class mask
+ * @driver_data: private driver data
+ *
+ * Adds a new dynamic pci device ID to this driver and causes the
+ * driver to probe for all devices again. @drv must have been
+ * registered prior to calling this function.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int pci_add_dynid(struct pci_driver *drv,
+ unsigned int vendor, unsigned int device,
+ unsigned int subvendor, unsigned int subdevice,
+ unsigned int class, unsigned int class_mask,
+ unsigned long driver_data)
+{
+ struct pci_dynid *dynid;
+ int retval;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.vendor = vendor;
+ dynid->id.device = device;
+ dynid->id.subvendor = subvendor;
+ dynid->id.subdevice = subdevice;
+ dynid->id.class = class;
+ dynid->id.class_mask = class_mask;
+ dynid->id.driver_data = driver_data;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ get_driver(&drv->driver);
+ retval = driver_attach(&drv->driver);
+ put_driver(&drv->driver);
+
+ return retval;
+}
+
+static void pci_free_dynids(struct pci_driver *drv)
+{
+ struct pci_dynid *dynid, *n;
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/*
+ * Dynamic device ID manipulation via sysfs is disabled for !CONFIG_HOTPLUG
+ */
+#ifdef CONFIG_HOTPLUG
/**
- * store_new_id - add a new PCI device ID to this driver and re-probe devices
+ * store_new_id - sysfs frontend to pci_add_dynid()
* @driver: target device driver
* @buf: buffer for scanning device ID data
* @count: input size
*
- * Adds a new dynamic pci device ID to this driver,
- * and causes the driver to probe for all devices again.
+ * Allow PCI IDs to be added to an existing driver via sysfs.
*/
static ssize_t
store_new_id(struct device_driver *driver, const char *buf, size_t count)
{
- struct pci_dynid *dynid;
struct pci_driver *pdrv = to_pci_driver(driver);
const struct pci_device_id *ids = pdrv->id_table;
__u32 vendor, device, subvendor=PCI_ANY_ID,
subdevice=PCI_ANY_ID, class=0, class_mask=0;
unsigned long driver_data=0;
int fields=0;
- int retval=0;
+ int retval;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
&vendor, &device, &subvendor, &subdevice,
@@ -72,27 +133,8 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
return retval;
}
- dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
- if (!dynid)
- return -ENOMEM;
-
- dynid->id.vendor = vendor;
- dynid->id.device = device;
- dynid->id.subvendor = subvendor;
- dynid->id.subdevice = subdevice;
- dynid->id.class = class;
- dynid->id.class_mask = class_mask;
- dynid->id.driver_data = driver_data;
-
- spin_lock(&pdrv->dynids.lock);
- list_add_tail(&dynid->node, &pdrv->dynids.list);
- spin_unlock(&pdrv->dynids.lock);
-
- if (get_driver(&pdrv->driver)) {
- retval = driver_attach(&pdrv->driver);
- put_driver(&pdrv->driver);
- }
-
+ retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
+ class, class_mask, driver_data);
if (retval)
return retval;
return count;
@@ -145,19 +187,6 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
}
static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
-static void
-pci_free_dynids(struct pci_driver *drv)
-{
- struct pci_dynid *dynid, *n;
-
- spin_lock(&drv->dynids.lock);
- list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
- list_del(&dynid->node);
- kfree(dynid);
- }
- spin_unlock(&drv->dynids.lock);
-}
-
static int
pci_create_newid_file(struct pci_driver *drv)
{
@@ -186,7 +215,6 @@ static void pci_remove_removeid_file(struct pci_driver *drv)
driver_remove_file(&drv->driver, &driver_attr_remove_id);
}
#else /* !CONFIG_HOTPLUG */
-static inline void pci_free_dynids(struct pci_driver *drv) {}
static inline int pci_create_newid_file(struct pci_driver *drv)
{
return 0;
@@ -417,8 +445,6 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
- pci_dev->state_saved = false;
-
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -514,7 +540,6 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
{
pci_restore_standard_config(pci_dev);
- pci_dev->state_saved = false;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
@@ -580,8 +605,6 @@ static int pci_pm_suspend(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_SUSPEND);
- pci_dev->state_saved = false;
-
if (!pm) {
pci_pm_default_suspend(pci_dev);
goto Fixup;
@@ -694,7 +717,7 @@ static int pci_pm_resume(struct device *dev)
pci_pm_reenable_device(pci_dev);
}
- return 0;
+ return error;
}
#else /* !CONFIG_SUSPEND */
@@ -716,8 +739,6 @@ static int pci_pm_freeze(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_FREEZE);
- pci_dev->state_saved = false;
-
if (!pm) {
pci_pm_default_suspend(pci_dev);
return 0;
@@ -793,6 +814,8 @@ static int pci_pm_thaw(struct device *dev)
pci_pm_reenable_device(pci_dev);
}
+ pci_dev->state_saved = false;
+
return error;
}
@@ -804,8 +827,6 @@ static int pci_pm_poweroff(struct device *dev)
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend(dev, PMSG_HIBERNATE);
- pci_dev->state_saved = false;
-
if (!pm) {
pci_pm_default_suspend(pci_dev);
goto Fixup;
@@ -1106,6 +1127,7 @@ static int __init pci_driver_init(void)
postcore_initcall(pci_driver_init);
+EXPORT_SYMBOL_GPL(pci_add_dynid);
EXPORT_SYMBOL(pci_match_id);
EXPORT_SYMBOL(__pci_register_driver);
EXPORT_SYMBOL(pci_unregister_driver);
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index 74fbec0bf6cb..f7b68ca6cc98 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -19,8 +19,16 @@
#include <linux/module.h>
#include <linux/pci.h>
+static char ids[1024] __initdata;
+
+module_param_string(ids, ids, sizeof(ids), 0);
+MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
+ "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\""
+ " and multiple comma separated entries can be specified");
+
static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
+ dev_printk(KERN_INFO, &dev->dev, "claimed by stub\n");
return 0;
}
@@ -32,7 +40,42 @@ static struct pci_driver stub_driver = {
static int __init pci_stub_init(void)
{
- return pci_register_driver(&stub_driver);
+ char *p, *id;
+ int rc;
+
+ rc = pci_register_driver(&stub_driver);
+ if (rc)
+ return rc;
+
+ /* add ids specified in the module parameter */
+ p = ids;
+ while ((id = strsep(&p, ","))) {
+ unsigned int vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class=0, class_mask=0;
+ int fields;
+
+ fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+
+ if (fields < 2) {
+ printk(KERN_WARNING
+ "pci-stub: invalid id string \"%s\"\n", id);
+ continue;
+ }
+
+ printk(KERN_INFO
+ "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
+ vendor, device, subvendor, subdevice, class, class_mask);
+
+ rc = pci_add_dynid(&stub_driver, vendor, device,
+ subvendor, subdevice, class, class_mask, 0);
+ if (rc)
+ printk(KERN_WARNING
+ "pci-stub: failed to add dynamic id (%d)\n", rc);
+ }
+
+ return 0;
}
static void __exit pci_stub_exit(void)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 85ebd02a64a7..0f6382f090ee 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -916,6 +916,24 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
return 0;
}
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+ ssize_t result = strict_strtoul(buf, 0, &val);
+
+ if (result < 0)
+ return result;
+
+ if (val != 1)
+ return -EINVAL;
+ return pci_reset_function(pdev);
+}
+
+static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
+
static int pci_create_capabilities_sysfs(struct pci_dev *dev)
{
int retval;
@@ -943,7 +961,22 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
/* Active State Power Management */
pcie_aspm_create_sysfs_dev_files(dev);
+ if (!pci_probe_reset_function(dev)) {
+ retval = device_create_file(&dev->dev, &reset_attr);
+ if (retval)
+ goto error;
+ dev->reset_fn = 1;
+ }
return 0;
+
+error:
+ pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->vpd && dev->vpd->attr) {
+ sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+ kfree(dev->vpd->attr);
+ }
+
+ return retval;
}
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
@@ -1037,6 +1070,10 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
}
pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->reset_fn) {
+ device_remove_file(&dev->dev, &reset_attr);
+ dev->reset_fn = 0;
+ }
}
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7b70312181d7..4e4c295a049f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -41,6 +41,12 @@ int pci_domains_supported = 1;
unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
+#define DEFAULT_HOTPLUG_IO_SIZE (256)
+#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
+/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
+unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -507,7 +513,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
- dev->current_state = state;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ if (dev->current_state != state && printk_ratelimit())
+ dev_info(&dev->dev, "Refused to change power state, "
+ "currently in D%d\n", dev->current_state);
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
@@ -848,6 +858,7 @@ pci_restore_state(struct pci_dev *dev)
if (!dev->state_saved)
return 0;
+
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
@@ -869,6 +880,8 @@ pci_restore_state(struct pci_dev *dev)
pci_restore_msi_state(dev);
pci_restore_iov_state(dev);
+ dev->state_saved = false;
+
return 0;
}
@@ -1214,30 +1227,40 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
*/
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
- int error = 0;
- bool pme_done = false;
+ int ret = 0;
if (enable && !device_may_wakeup(&dev->dev))
return -EINVAL;
+ /* Don't do the same thing twice in a row for one device. */
+ if (!!enable == !!dev->wakeup_prepared)
+ return 0;
+
/*
* According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
* Anderson we should be doing PME# wake enable followed by ACPI wake
* enable. To disable wake-up we call the platform first, for symmetry.
*/
- if (!enable && platform_pci_can_wakeup(dev))
- error = platform_pci_sleep_wake(dev, false);
-
- if (!enable || pci_pme_capable(dev, state)) {
- pci_pme_active(dev, enable);
- pme_done = true;
- }
+ if (enable) {
+ int error;
- if (enable && platform_pci_can_wakeup(dev))
+ if (pci_pme_capable(dev, state))
+ pci_pme_active(dev, true);
+ else
+ ret = 1;
error = platform_pci_sleep_wake(dev, true);
+ if (ret)
+ ret = error;
+ if (!ret)
+ dev->wakeup_prepared = true;
+ } else {
+ platform_pci_sleep_wake(dev, false);
+ pci_pme_active(dev, false);
+ dev->wakeup_prepared = false;
+ }
- return pme_done ? 0 : error;
+ return ret;
}
/**
@@ -1356,6 +1379,7 @@ void pci_pm_init(struct pci_dev *dev)
int pm;
u16 pmc;
+ dev->wakeup_prepared = false;
dev->pm_cap = 0;
/* find PCI PM capability in list */
@@ -2262,6 +2286,22 @@ int __pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
+ * pci_probe_reset_function - check whether the device can be safely reset
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * Returns 0 if the device function can be reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_probe_reset_function(struct pci_dev *dev)
+{
+ return pci_dev_reset(dev, 1);
+}
+
+/**
* pci_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
@@ -2504,6 +2544,50 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return 0;
}
+/**
+ * pci_set_vga_state - set VGA decode state on device and parents if requested
+ * @dev: the PCI device
+ * @decode: true = enable decoding, false = disable decoding
+ * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
+ * @change_bridge: traverse ancestors and change bridges
+ */
+int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
+
+ WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (decode == true)
+ cmd |= command_bits;
+ else
+ cmd &= ~command_bits;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ if (change_bridge == false)
+ return 0;
+
+ bus = dev->bus;
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &cmd);
+ if (decode == true)
+ cmd |= PCI_BRIDGE_CTL_VGA;
+ else
+ cmd &= ~PCI_BRIDGE_CTL_VGA;
+ pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
+ cmd);
+ }
+ bus = bus->parent;
+ }
+ return 0;
+}
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
@@ -2639,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
-static int __devinit pci_init(void)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- pci_fixup_device(pci_fixup_final, dev);
- }
-
- return 0;
-}
-
static int __init pci_setup(char *str)
{
while (str) {
@@ -2672,6 +2745,10 @@ static int __init pci_setup(char *str)
strlen(str + 19));
} else if (!strncmp(str, "ecrc=", 5)) {
pcie_ecrc_get_policy(str + 5);
+ } else if (!strncmp(str, "hpiosize=", 9)) {
+ pci_hotplug_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "hpmemsize=", 10)) {
+ pci_hotplug_mem_size = memparse(str + 10, &str);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
@@ -2683,8 +2760,6 @@ static int __init pci_setup(char *str)
}
early_param("pci", pci_setup);
-device_initcall(pci_init);
-
EXPORT_SYMBOL(pci_reenable_device);
EXPORT_SYMBOL(pci_enable_device_io);
EXPORT_SYMBOL(pci_enable_device_mem);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5ff4d25bf0e9..d92d1954a2fb 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -16,6 +16,7 @@ extern void pci_cleanup_rom(struct pci_dev *dev);
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
struct vm_area_struct *vma);
#endif
+int pci_probe_reset_function(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@@ -133,7 +134,6 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
-extern int pcie_mch_quirk;
extern struct device_attribute pci_dev_attrs[];
extern struct device_attribute dev_attr_cpuaffinity;
extern struct device_attribute dev_attr_cpulistaffinity;
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index d92ae21a59d8..62d15f652bb6 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -22,11 +22,10 @@
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/fs.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "aerdrv.h"
-struct aer_error_inj
-{
+struct aer_error_inj {
u8 bus;
u8 dev;
u8 fn;
@@ -38,8 +37,7 @@ struct aer_error_inj
u32 header_log3;
};
-struct aer_error
-{
+struct aer_error {
struct list_head list;
unsigned int bus;
unsigned int devfn;
@@ -55,8 +53,7 @@ struct aer_error
u32 source_id;
};
-struct pci_bus_ops
-{
+struct pci_bus_ops {
struct list_head list;
struct pci_bus *bus;
struct pci_ops *ops;
@@ -150,7 +147,7 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
target = &err->header_log1;
break;
case PCI_ERR_HEADER_LOG+8:
- target = &err->header_log2;
+ target = &err->header_log2;
break;
case PCI_ERR_HEADER_LOG+12:
target = &err->header_log3;
@@ -258,8 +255,7 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
bus_ops = NULL;
out:
spin_unlock_irqrestore(&inject_lock, flags);
- if (bus_ops)
- kfree(bus_ops);
+ kfree(bus_ops);
return 0;
}
@@ -401,10 +397,8 @@ static int aer_inject(struct aer_error_inj *einj)
else
ret = -EINVAL;
out_put:
- if (err_alloc)
- kfree(err_alloc);
- if (rperr_alloc)
- kfree(rperr_alloc);
+ kfree(err_alloc);
+ kfree(rperr_alloc);
pci_dev_put(dev);
return ret;
}
@@ -458,8 +452,7 @@ static void __exit aer_inject_exit(void)
}
spin_lock_irqsave(&inject_lock, flags);
- list_for_each_entry_safe(err, err_next,
- &pci_bus_ops_list, list) {
+ list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) {
list_del(&err->list);
kfree(err);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 4770f13b3ca1..40c3cc5d1caf 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
@@ -38,7 +39,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static int __devinit aer_probe (struct pcie_device *dev);
+static int __devinit aer_probe(struct pcie_device *dev);
static void aer_remove(struct pcie_device *dev);
static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
enum pci_channel_state error);
@@ -47,12 +48,12 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
static struct pci_error_handlers aer_error_handlers = {
.error_detected = aer_error_detected,
- .resume = aer_error_resume,
+ .resume = aer_error_resume,
};
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
- .port_type = PCIE_ANY_PORT,
+ .port_type = PCIE_RC_PORT,
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
@@ -134,12 +135,12 @@ EXPORT_SYMBOL_GPL(aer_irq);
*
* Invoked when Root Port's AER service is loaded.
**/
-static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev)
+static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
{
struct aer_rpc *rpc;
- if (!(rpc = kzalloc(sizeof(struct aer_rpc),
- GFP_KERNEL)))
+ rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
+ if (!rpc)
return NULL;
/*
@@ -189,26 +190,28 @@ static void aer_remove(struct pcie_device *dev)
*
* Invoked when PCI Express bus loads AER service driver.
**/
-static int __devinit aer_probe (struct pcie_device *dev)
+static int __devinit aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
struct device *device = &dev->device;
/* Init */
- if ((status = aer_init(dev)))
+ status = aer_init(dev);
+ if (status)
return status;
/* Alloc rpc data structure */
- if (!(rpc = aer_alloc_rpc(dev))) {
+ rpc = aer_alloc_rpc(dev);
+ if (!rpc) {
dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
aer_remove(dev);
return -ENOMEM;
}
/* Request IRQ ISR */
- if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv",
- dev))) {
+ status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
+ if (status) {
dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
aer_remove(dev);
return status;
@@ -316,6 +319,8 @@ static int __init aer_service_init(void)
{
if (pcie_aer_disable)
return -ENXIO;
+ if (!pci_msi_enabled())
+ return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index bbd7428ca2d0..bd833ea3ba49 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -16,12 +16,9 @@
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
-#define AER_UNCORRECTABLE 4
-#define AER_ERROR_MASK 0x001fffff
-#define AER_ERROR(d) (d & AER_ERROR_MASK)
/* Root Error Status Register Bits */
-#define ROOT_ERR_STATUS_MASKS 0x0f
+#define ROOT_ERR_STATUS_MASKS 0x0f
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
@@ -32,8 +29,6 @@
#define ERR_COR_ID(d) (d & 0xffff)
#define ERR_UNCOR_ID(d) (d >> 16)
-#define AER_SUCCESS 0
-#define AER_UNSUCCESS 1
#define AER_ERROR_SOURCES_MAX 100
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
@@ -43,13 +38,6 @@
PCI_ERR_UNC_UNX_COMP| \
PCI_ERR_UNC_MALF_TLP)
-/* AER Error Info Flags */
-#define AER_TLP_HEADER_VALID_FLAG 0x00000001
-#define AER_MULTI_ERROR_VALID_FLAG 0x00000002
-
-#define ERR_CORRECTABLE_ERROR_MASK 0x000031c1
-#define ERR_UNCORRECTABLE_ERROR_MASK 0x001ff010
-
struct header_log_regs {
unsigned int dw0;
unsigned int dw1;
@@ -61,11 +49,20 @@ struct header_log_regs {
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
- u16 id;
- int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
- int flags;
+
+ unsigned int id:16;
+
+ unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
+ unsigned int __pad1:5;
+ unsigned int multi_error_valid:1;
+
+ unsigned int first_error:5;
+ unsigned int __pad2:2;
+ unsigned int tlp_header_valid:1;
+
unsigned int status; /* COR/UNCOR Error Status */
- struct header_log_regs tlp; /* TLP Header */
+ unsigned int mask; /* COR/UNCOR Error Mask */
+ struct header_log_regs tlp; /* TLP Header */
};
struct aer_err_source {
@@ -125,6 +122,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
@@ -136,4 +134,4 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
}
#endif
-#endif //_AERDRV_H_
+#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 3d8872704a58..9f5ccbeb4fa5 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -49,10 +49,11 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE;
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
- reg16);
+ pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
int pci_disable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -68,10 +69,11 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
PCI_EXP_DEVCTL_NFERE |
PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_URRE);
- pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
- reg16);
+ pci_write_config_word(dev, pos+PCI_EXP_DEVCTL, reg16);
+
return 0;
}
+EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
@@ -92,6 +94,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
#if 0
int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
@@ -110,7 +113,6 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
}
#endif /* 0 */
-
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
@@ -164,8 +166,9 @@ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
e_info->dev[e_info->error_dev_num] = dev;
e_info->error_dev_num++;
return 1;
- } else
- return 0;
+ }
+
+ return 0;
}
@@ -193,7 +196,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* If there is no multiple error, we stop
* or continue based on the id comparing.
*/
- if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ if (!e_info->multi_error_valid)
return result;
/*
@@ -233,24 +236,16 @@ static int find_device_iter(struct pci_dev *dev, void *data)
status = 0;
mask = 0;
if (e_info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev,
- pos + PCI_ERR_COR_STATUS,
- &status);
- pci_read_config_dword(dev,
- pos + PCI_ERR_COR_MASK,
- &mask);
- if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
+ if (status & ~mask) {
add_error_device(e_info, dev);
goto added;
}
} else {
- pci_read_config_dword(dev,
- pos + PCI_ERR_UNCOR_STATUS,
- &status);
- pci_read_config_dword(dev,
- pos + PCI_ERR_UNCOR_MASK,
- &mask);
- if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
+ if (status & ~mask) {
add_error_device(e_info, dev);
goto added;
}
@@ -259,7 +254,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
return 0;
added:
- if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ if (e_info->multi_error_valid)
return 0;
else
return 1;
@@ -411,8 +406,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
pci_cleanup_aer_uncorrect_error_status(dev);
dev->error_state = pci_channel_io_normal;
}
- }
- else {
+ } else {
/*
* If the error is reported by an end point, we think this
* error is related to the upstream link of the end point.
@@ -473,7 +467,7 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
udev = dev;
else
- udev= dev->bus->self;
+ udev = dev->bus->self;
data.is_downstream = 0;
data.aer_driver = NULL;
@@ -576,7 +570,7 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pcie_device * aerdev,
+static void handle_error_source(struct pcie_device *aerdev,
struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -682,7 +676,7 @@ static void disable_root_aer(struct aer_rpc *rpc)
*
* Invoked by DPC handler to consume an error.
*/
-static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
+static struct aer_err_source *get_e_source(struct aer_rpc *rpc)
{
struct aer_err_source *e_source;
unsigned long flags;
@@ -702,32 +696,50 @@ static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
return e_source;
}
+/**
+ * get_device_error_info - read error status from dev and store it to info
+ * @dev: pointer to the device expected to have a error record
+ * @info: pointer to structure to store the error record
+ *
+ * Return 1 on success, 0 on error.
+ */
static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
{
- int pos;
+ int pos, temp;
+
+ info->status = 0;
+ info->tlp_header_valid = 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
/* The device might not support AER */
if (!pos)
- return AER_SUCCESS;
+ return 1;
if (info->severity == AER_CORRECTABLE) {
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
&info->status);
- if (!(info->status & ERR_CORRECTABLE_ERROR_MASK))
- return AER_UNSUCCESS;
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
+ &info->mask);
+ if (!(info->status & ~info->mask))
+ return 0;
} else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
&info->status);
- if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK))
- return AER_UNSUCCESS;
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
+ &info->mask);
+ if (!(info->status & ~info->mask))
+ return 0;
+
+ /* Get First Error Pointer */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
+ info->first_error = PCI_ERR_CAP_FEP(temp);
if (info->status & AER_LOG_TLP_MASKS) {
- info->flags |= AER_TLP_HEADER_VALID_FLAG;
+ info->tlp_header_valid = 1;
pci_read_config_dword(dev,
pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
pci_read_config_dword(dev,
@@ -739,7 +751,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
}
}
- return AER_SUCCESS;
+ return 1;
}
static inline void aer_process_err_devices(struct pcie_device *p_device,
@@ -753,14 +765,14 @@ static inline void aer_process_err_devices(struct pcie_device *p_device,
e_info->id);
}
+ /* Report all before handle them, not to lost records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (get_device_error_info(e_info->dev[i], e_info) ==
- AER_SUCCESS) {
+ if (get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
- handle_error_source(p_device,
- e_info->dev[i],
- e_info);
- }
+ }
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info))
+ handle_error_source(p_device, e_info->dev[i], e_info);
}
}
@@ -806,7 +818,9 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+ e_info->multi_error_valid = 1;
+
+ aer_print_port_info(p_device->port, e_info);
find_source_device(p_device->port, e_info);
aer_process_err_devices(p_device, e_info);
@@ -863,10 +877,5 @@ int aer_init(struct pcie_device *dev)
if (aer_osc_setup(dev) && !forceload)
return -ENXIO;
- return AER_SUCCESS;
+ return 0;
}
-
-EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
-EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
-EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
-
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 0fc29ae80df8..44acde72294f 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -27,69 +27,70 @@
#define AER_AGENT_COMPLETER 2
#define AER_AGENT_TRANSMITTER 3
-#define AER_AGENT_REQUESTER_MASK (PCI_ERR_UNC_COMP_TIME| \
- PCI_ERR_UNC_UNSUP)
-
-#define AER_AGENT_COMPLETER_MASK PCI_ERR_UNC_COMP_ABORT
-
-#define AER_AGENT_TRANSMITTER_MASK(t, e) (e & (PCI_ERR_COR_REP_ROLL| \
- ((t == AER_CORRECTABLE) ? PCI_ERR_COR_REP_TIMER: 0)))
+#define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
+#define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ 0 : PCI_ERR_UNC_COMP_ABORT)
+#define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
#define AER_GET_AGENT(t, e) \
- ((e & AER_AGENT_COMPLETER_MASK) ? AER_AGENT_COMPLETER : \
- (e & AER_AGENT_REQUESTER_MASK) ? AER_AGENT_REQUESTER : \
- (AER_AGENT_TRANSMITTER_MASK(t, e)) ? AER_AGENT_TRANSMITTER : \
+ ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
+ (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
+ (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
AER_AGENT_RECEIVER)
-#define AER_PHYSICAL_LAYER_ERROR_MASK PCI_ERR_COR_RCVR
-#define AER_DATA_LINK_LAYER_ERROR_MASK(t, e) \
- (PCI_ERR_UNC_DLP| \
- PCI_ERR_COR_BAD_TLP| \
- PCI_ERR_COR_BAD_DLLP| \
- PCI_ERR_COR_REP_ROLL| \
- ((t == AER_CORRECTABLE) ? \
- PCI_ERR_COR_REP_TIMER: 0))
-
#define AER_PHYSICAL_LAYER_ERROR 0
#define AER_DATA_LINK_LAYER_ERROR 1
#define AER_TRANSACTION_LAYER_ERROR 2
-#define AER_GET_LAYER_ERROR(t, e) \
- ((e & AER_PHYSICAL_LAYER_ERROR_MASK) ? \
- AER_PHYSICAL_LAYER_ERROR : \
- (e & AER_DATA_LINK_LAYER_ERROR_MASK(t, e)) ? \
- AER_DATA_LINK_LAYER_ERROR : \
- AER_TRANSACTION_LAYER_ERROR)
+#define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
+ PCI_ERR_COR_RCVR : 0)
+#define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
+ (PCI_ERR_COR_BAD_TLP| \
+ PCI_ERR_COR_BAD_DLLP| \
+ PCI_ERR_COR_REP_ROLL| \
+ PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
+
+#define AER_GET_LAYER_ERROR(t, e) \
+ ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
+ (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
+ AER_TRANSACTION_LAYER_ERROR)
+
+#define AER_PR(info, pdev, fmt, args...) \
+ printk("%s%s %s: " fmt, (info->severity == AER_CORRECTABLE) ? \
+ KERN_WARNING : KERN_ERR, dev_driver_string(&pdev->dev), \
+ dev_name(&pdev->dev), ## args)
/*
* AER error strings
*/
-static char* aer_error_severity_string[] = {
+static char *aer_error_severity_string[] = {
"Uncorrected (Non-Fatal)",
"Uncorrected (Fatal)",
"Corrected"
};
-static char* aer_error_layer[] = {
+static char *aer_error_layer[] = {
"Physical Layer",
"Data Link Layer",
"Transaction Layer"
};
-static char* aer_correctable_error_string[] = {
- "Receiver Error ", /* Bit Position 0 */
+static char *aer_correctable_error_string[] = {
+ "Receiver Error ", /* Bit Position 0 */
NULL,
NULL,
NULL,
NULL,
NULL,
- "Bad TLP ", /* Bit Position 6 */
- "Bad DLLP ", /* Bit Position 7 */
- "RELAY_NUM Rollover ", /* Bit Position 8 */
+ "Bad TLP ", /* Bit Position 6 */
+ "Bad DLLP ", /* Bit Position 7 */
+ "RELAY_NUM Rollover ", /* Bit Position 8 */
NULL,
NULL,
NULL,
- "Replay Timer Timeout ", /* Bit Position 12 */
- "Advisory Non-Fatal ", /* Bit Position 13 */
+ "Replay Timer Timeout ", /* Bit Position 12 */
+ "Advisory Non-Fatal ", /* Bit Position 13 */
NULL,
NULL,
NULL,
@@ -110,7 +111,7 @@ static char* aer_correctable_error_string[] = {
NULL,
};
-static char* aer_uncorrectable_error_string[] = {
+static char *aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
@@ -123,10 +124,10 @@ static char* aer_uncorrectable_error_string[] = {
NULL,
NULL,
NULL,
- "Poisoned TLP ", /* Bit Position 12 */
+ "Poisoned TLP ", /* Bit Position 12 */
"Flow Control Protocol ", /* Bit Position 13 */
- "Completion Timeout ", /* Bit Position 14 */
- "Completer Abort ", /* Bit Position 15 */
+ "Completion Timeout ", /* Bit Position 14 */
+ "Completer Abort ", /* Bit Position 15 */
"Unexpected Completion ", /* Bit Position 16 */
"Receiver Overflow ", /* Bit Position 17 */
"Malformed TLP ", /* Bit Position 18 */
@@ -145,98 +146,69 @@ static char* aer_uncorrectable_error_string[] = {
NULL,
};
-static char* aer_agent_string[] = {
+static char *aer_agent_string[] = {
"Receiver ID",
"Requester ID",
"Completer ID",
"Transmitter ID"
};
-static char * aer_get_error_source_name(int severity,
- unsigned int status,
- char errmsg_buff[])
+static void __aer_print_error(struct aer_err_info *info, struct pci_dev *dev)
{
- int i;
- char * errmsg = NULL;
+ int i, status;
+ char *errmsg = NULL;
+
+ status = (info->status & ~info->mask);
for (i = 0; i < 32; i++) {
if (!(status & (1 << i)))
continue;
- if (severity == AER_CORRECTABLE)
+ if (info->severity == AER_CORRECTABLE)
errmsg = aer_correctable_error_string[i];
else
errmsg = aer_uncorrectable_error_string[i];
- if (!errmsg) {
- sprintf(errmsg_buff, "Unknown Error Bit %2d ", i);
- errmsg = errmsg_buff;
- }
-
- break;
+ if (errmsg)
+ AER_PR(info, dev, " [%2d] %s%s\n", i, errmsg,
+ info->first_error == i ? " (First)" : "");
+ else
+ AER_PR(info, dev, " [%2d] Unknown Error Bit%s\n", i,
+ info->first_error == i ? " (First)" : "");
}
-
- return errmsg;
}
-static DEFINE_SPINLOCK(logbuf_lock);
-static char errmsg_buff[100];
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
- char * errmsg;
- int err_layer, agent;
- char * loglevel;
-
- if (info->severity == AER_CORRECTABLE)
- loglevel = KERN_WARNING;
- else
- loglevel = KERN_ERR;
-
- printk("%s+------ PCI-Express Device Error ------+\n", loglevel);
- printk("%sError Severity\t\t: %s\n", loglevel,
- aer_error_severity_string[info->severity]);
-
- if ( info->status == 0) {
- printk("%sPCIE Bus Error type\t: (Unaccessible)\n", loglevel);
- printk("%sUnaccessible Received\t: %s\n", loglevel,
- info->flags & AER_MULTI_ERROR_VALID_FLAG ?
- "Multiple" : "First");
- printk("%sUnregistered Agent ID\t: %04x\n", loglevel,
- (dev->bus->number << 8) | dev->devfn);
+ int id = ((dev->bus->number << 8) | dev->devfn);
+
+ if (info->status == 0) {
+ AER_PR(info, dev,
+ "PCIE Bus Error: severity=%s, type=Unaccessible, "
+ "id=%04x(Unregistered Agent ID)\n",
+ aer_error_severity_string[info->severity], id);
} else {
- err_layer = AER_GET_LAYER_ERROR(info->severity, info->status);
- printk("%sPCIE Bus Error type\t: %s\n", loglevel,
- aer_error_layer[err_layer]);
-
- spin_lock(&logbuf_lock);
- errmsg = aer_get_error_source_name(info->severity,
- info->status,
- errmsg_buff);
- printk("%s%s\t: %s\n", loglevel, errmsg,
- info->flags & AER_MULTI_ERROR_VALID_FLAG ?
- "Multiple" : "First");
- spin_unlock(&logbuf_lock);
+ int layer, agent;
+ layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- printk("%s%s\t\t: %04x\n", loglevel,
- aer_agent_string[agent],
- (dev->bus->number << 8) | dev->devfn);
-
- printk("%sVendorID=%04xh, DeviceID=%04xh,"
- " Bus=%02xh, Device=%02xh, Function=%02xh\n",
- loglevel,
- dev->vendor,
- dev->device,
- dev->bus->number,
- PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn));
-
- if (info->flags & AER_TLP_HEADER_VALID_FLAG) {
+
+ AER_PR(info, dev,
+ "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ aer_error_severity_string[info->severity],
+ aer_error_layer[layer], id, aer_agent_string[agent]);
+
+ AER_PR(info, dev,
+ " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device, info->status, info->mask);
+
+ __aer_print_error(info, dev);
+
+ if (info->tlp_header_valid) {
unsigned char *tlp = (unsigned char *) &info->tlp;
- printk("%sTLP Header:\n", loglevel);
- printk("%s%02x%02x%02x%02x %02x%02x%02x%02x"
+ AER_PR(info, dev, " TLP Header:"
+ " %02x%02x%02x%02x %02x%02x%02x%02x"
" %02x%02x%02x%02x %02x%02x%02x%02x\n",
- loglevel,
*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp,
*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4),
*(tlp + 11), *(tlp + 10), *(tlp + 9),
@@ -244,5 +216,15 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
*(tlp + 13), *(tlp + 12));
}
}
+
+ if (info->id && info->error_dev_num > 1 && info->id == id)
+ AER_PR(info, dev,
+ " Error of this Agent(%04x) is reported first\n", id);
}
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
+{
+ dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity], info->id);
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 3d27c97e0486..745402e8e498 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,6 +26,13 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
+/* Note: those are not register definitions */
+#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+#define ASPM_STATE_L1 (4) /* L1 state */
+#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
struct aspm_latency {
u32 l0s; /* L0s latency (nsec) */
u32 l1; /* L1 latency (nsec) */
@@ -40,17 +47,20 @@ struct pcie_link_state {
struct list_head link; /* node in parent's children list */
/* ASPM state */
- u32 aspm_support:2; /* Supported ASPM state */
- u32 aspm_enabled:2; /* Enabled ASPM state */
- u32 aspm_default:2; /* Default ASPM state by BIOS */
+ u32 aspm_support:3; /* Supported ASPM state */
+ u32 aspm_enabled:3; /* Enabled ASPM state */
+ u32 aspm_capable:3; /* Capable ASPM state with latency */
+ u32 aspm_default:3; /* Default ASPM state by BIOS */
+ u32 aspm_disable:3; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
- /* Latencies */
- struct aspm_latency latency; /* Exit latency */
+ /* Exit latencies */
+ struct aspm_latency latency_up; /* Upstream direction exit latency */
+ struct aspm_latency latency_dw; /* Downstream direction exit latency */
/*
* Endpoint acceptable latencies. A pcie downstream port only
* has one slot under it, so at most there are 8 functions.
@@ -82,7 +92,7 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return ASPM_STATE_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -164,18 +174,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
-{
- struct pci_dev *child;
- struct pci_bus *linkbus = link->pdev->subordinate;
-
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
- return true;
- }
- return false;
-}
-
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -288,71 +286,130 @@ static u32 calc_l1_acceptable(u32 encoding)
return (1000 << encoding);
}
-static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- u32 *l0s, u32 *l1, u32 *enabled)
+struct aspm_register_info {
+ u32 support:2;
+ u32 enabled:2;
+ u32 latency_encoding_l0s;
+ u32 latency_encoding_l1;
+};
+
+static void pcie_get_aspm_reg(struct pci_dev *pdev,
+ struct aspm_register_info *info)
{
int pos;
u16 reg16;
- u32 reg32, encoding;
+ u32 reg32;
- *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
- *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
- *state = 0;
- if (*state == 0)
+ info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
+ info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
+}
+
+static void pcie_aspm_check_latency(struct pci_dev *endpoint)
+{
+ u32 latency, l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ /* Device not in D0 doesn't need latency check */
+ if ((endpoint->current_state != PCI_D0) &&
+ (endpoint->current_state != PCI_UNKNOWN))
return;
- encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_l0s_latency(encoding);
- if (*state & PCIE_LINK_STATE_L1) {
- encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_l1_latency(encoding);
+ link = endpoint->bus->self->link_state;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link) {
+ /* Check upstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ (link->latency_up.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+
+ /* Check downstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ (link->latency_dw.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ /*
+ * Check L1 latency.
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
+ if ((link->aspm_capable & ASPM_STATE_L1) &&
+ (latency + l1_switch_latency > acceptable->l1))
+ link->aspm_capable &= ~ASPM_STATE_L1;
+ l1_switch_latency += 1000;
+
+ link = link->parent;
}
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- u32 support, l0s, l1, enabled;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
+ struct aspm_register_info upreg, dwreg;
if (blacklist) {
- /* Set support state to 0, so we will disable ASPM later */
- link->aspm_support = 0;
- link->aspm_default = 0;
- link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ /* Set enabled/disable so that we will disable ASPM later */
+ link->aspm_enabled = ASPM_STATE_ALL;
+ link->aspm_disable = ASPM_STATE_ALL;
return;
}
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
- /* upstream component states */
- pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
- link->aspm_support = support;
- link->latency.l0s = l0s;
- link->latency.l1 = l1;
- link->aspm_enabled = enabled;
-
- /* downstream component states, all functions have the same setting */
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
- pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
- link->aspm_support &= support;
- link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
- link->latency.l1 = max_t(u32, link->latency.l1, l1);
+ pcie_get_aspm_reg(child, &dwreg);
- if (!link->aspm_support)
- return;
-
- link->aspm_enabled &= link->aspm_support;
+ /*
+ * Setup L0s state
+ *
+ * Note that we must not enable L0s in either direction on a
+ * given link unless components on both sides of the link each
+ * support L0s.
+ */
+ if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
+ link->aspm_support |= ASPM_STATE_L0S;
+ if (dwreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ if (upreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
+ link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
+
+ /* Setup L1 state */
+ if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
+ link->aspm_support |= ASPM_STATE_L1;
+ if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
+ link->aspm_enabled |= ASPM_STATE_L1;
+ link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
+ link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+
+ /* Save default state */
link->aspm_default = link->aspm_enabled;
- /* ENDPOINT states*/
+ /* Setup initial capable state. Will be updated later */
+ link->aspm_capable = link->aspm_support;
+ /*
+ * If the downstream component has pci bridge function, don't
+ * do ASPM for now.
+ */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+ link->aspm_disable = ASPM_STATE_ALL;
+ break;
+ }
+ }
+
+ /* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
u32 reg32, encoding;
@@ -365,109 +422,46 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
pos = pci_find_capability(child, PCI_CAP_ID_EXP);
pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ /* Calculate endpoint L0s acceptable latency */
encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
acceptable->l0s = calc_l0s_acceptable(encoding);
- if (link->aspm_support & PCIE_LINK_STATE_L1) {
- encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- acceptable->l1 = calc_l1_acceptable(encoding);
- }
- }
-}
-
-/**
- * __pcie_aspm_check_state_one - check latency for endpoint device.
- * @endpoint: pointer to the struct pci_dev of endpoint device
- *
- * TBD: The latency from the endpoint to root complex vary per switch's
- * upstream link state above the device. Here we just do a simple check
- * which assumes all links above the device can be in L1 state, that
- * is we just consider the worst case. If switch's upstream link can't
- * be put into L0S/L1, then our check is too strictly.
- */
-static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
-{
- u32 l1_switch_latency = 0;
- struct aspm_latency *acceptable;
- struct pcie_link_state *link;
-
- link = endpoint->bus->self->link_state;
- state &= link->aspm_support;
- acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+ /* Calculate endpoint L1 acceptable latency */
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
- while (link && state) {
- if ((state & PCIE_LINK_STATE_L0S) &&
- (link->latency.l0s > acceptable->l0s))
- state &= ~PCIE_LINK_STATE_L0S;
- if ((state & PCIE_LINK_STATE_L1) &&
- (link->latency.l1 + l1_switch_latency > acceptable->l1))
- state &= ~PCIE_LINK_STATE_L1;
- link = link->parent;
- /*
- * Every switch on the path to root complex need 1
- * more microsecond for L1. Spec doesn't mention L0s.
- */
- l1_switch_latency += 1000;
- }
- return state;
-}
-
-static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
-{
- pci_power_t power_state;
- struct pci_dev *child;
- struct pci_bus *linkbus = link->pdev->subordinate;
-
- /* If no child, ignore the link */
- if (list_empty(&linkbus->devices))
- return state;
-
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- */
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
-
- if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child->pcie_type != PCI_EXP_TYPE_LEG_END))
- continue;
- /* Device not in D0 doesn't need check latency */
- power_state = child->current_state;
- if (power_state == PCI_D1 || power_state == PCI_D2 ||
- power_state == PCI_D3hot || power_state == PCI_D3cold)
- continue;
- state = __pcie_aspm_check_state_one(child, state);
+ pcie_aspm_check_latency(child);
}
- return state;
}
-static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
+static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
u16 reg16;
int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~0x3;
- reg16 |= state;
+ reg16 |= val;
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
+static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
{
+ u32 upstream = 0, dwstream = 0;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
- /* If no child, disable the link */
- if (list_empty(&linkbus->devices))
- state = 0;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return;
+ /* Nothing to do if the link is already in the requested state */
+ state &= (link->aspm_capable & ~link->aspm_disable);
+ if (link->aspm_enabled == state)
+ return;
+ /* Convert ASPM state to upstream/downstream ASPM register state */
+ if (state & ASPM_STATE_L0S_UP)
+ dwstream |= PCIE_LINK_STATE_L0S;
+ if (state & ASPM_STATE_L0S_DW)
+ upstream |= PCIE_LINK_STATE_L0S;
+ if (state & ASPM_STATE_L1) {
+ upstream |= PCIE_LINK_STATE_L1;
+ dwstream |= PCIE_LINK_STATE_L1;
}
/*
* Spec 2.0 suggests all functions should be configured the
@@ -475,67 +469,24 @@ static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
* upstream component first and then downstream, and vice
* versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
- if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(parent, state);
-
+ if (state & ASPM_STATE_L1)
+ pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
- __pcie_aspm_config_one_dev(child, state);
-
- if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(parent, state);
+ pcie_config_aspm_dev(child, dwstream);
+ if (!(state & ASPM_STATE_L1))
+ pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
}
-/* Check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
- u32 state)
+static void pcie_config_aspm_path(struct pcie_link_state *link)
{
- struct pcie_link_state *leaf, *root = link->root;
-
- state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
-
- /* Check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibling) {
- if (!list_empty(&leaf->children) || (leaf->root != root))
- continue;
- state = pcie_aspm_check_state(leaf, state);
- }
- /* Check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root, state);
- if (link->aspm_enabled == state)
- return;
- /*
- * We must change the hierarchy. See comments in
- * __pcie_aspm_config_link for the order
- **/
- if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibling) {
- if (leaf->root == root)
- __pcie_aspm_config_link(leaf, state);
- }
- } else {
- list_for_each_entry_reverse(leaf, &link_list, sibling) {
- if (leaf->root == root)
- __pcie_aspm_config_link(leaf, state);
- }
+ while (link) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ link = link->parent;
}
}
-/*
- * pcie_aspm_configure_link_state: enable/disable PCI express link state
- * @pdev: the root port or switch downstream port
- */
-static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
- u32 state)
-{
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(link, state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
static void free_link_state(struct pcie_link_state *link)
{
link->pdev->link_state = NULL;
@@ -570,10 +521,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
return 0;
}
-static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
{
struct pcie_link_state *link;
- int blacklist = !!pcie_aspm_sanity_check(pdev);
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
@@ -599,15 +549,7 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
link->root = link->parent->root;
list_add(&link->sibling, &link_list);
-
pdev->link_state = link;
-
- /* Check ASPM capability */
- pcie_aspm_cap_init(link, blacklist);
-
- /* Check Clock PM capability */
- pcie_clkpm_cap_init(link, blacklist);
-
return link;
}
@@ -618,8 +560,8 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- u32 state;
struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
@@ -637,47 +579,64 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
goto out;
mutex_lock(&aspm_lock);
- link = pcie_aspm_setup_link_state(pdev);
+ link = alloc_pcie_link_state(pdev);
if (!link)
goto unlock;
/*
- * Setup initial ASPM state
- *
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. But we must
- * make sure BIOS doesn't set unsupported link state.
+ * Setup initial ASPM state. Note that we need to configure
+ * upstream links also because capable state of them can be
+ * update through pcie_aspm_cap_init().
*/
- if (pcie_aspm_downstream_has_switch(link)) {
- state = pcie_aspm_check_state(link, link->aspm_default);
- __pcie_aspm_config_link(link, state);
- } else {
- state = policy_to_aspm_state(link);
- __pcie_aspm_configure_link_state(link, state);
- }
+ pcie_aspm_cap_init(link, blacklist);
+ pcie_config_aspm_path(link);
/* Setup initial Clock PM state */
- state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
- pcie_set_clkpm(link, state);
+ pcie_clkpm_cap_init(link, blacklist);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
}
+/* Recheck latencies and update aspm_capable for links under the root */
+static void pcie_update_aspm_capable(struct pcie_link_state *root)
+{
+ struct pcie_link_state *link;
+ BUG_ON(root->parent);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ link->aspm_capable = link->aspm_support;
+ }
+ list_for_each_entry(link, &link_list, sibling) {
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
+ if (link->root != root)
+ continue;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) &&
+ (child->pcie_type != PCI_EXP_TYPE_LEG_END))
+ continue;
+ pcie_aspm_check_latency(child);
+ }
+ }
+}
+
/* @pdev: the endpoint device */
void pcie_aspm_exit_link_state(struct pci_dev *pdev)
{
struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state = parent->link_state;
+ struct pcie_link_state *link, *root, *parent_link;
- if (aspm_disabled || !pdev->is_pcie || !parent || !link_state)
+ if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state)
return;
- if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
return;
+
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
-
/*
* All PCIe functions are in one slot, remove one function will remove
* the whole slot, so just wait until we are the last function left.
@@ -685,13 +644,20 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
goto out;
+ link = parent->link_state;
+ root = link->root;
+ parent_link = link->parent;
+
/* All functions are removed, so just disable ASPM for the link */
- __pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibling);
- list_del(&link_state->link);
+ pcie_config_aspm_link(link, 0);
+ list_del(&link->sibling);
+ list_del(&link->link);
/* Clock PM is for endpoint device */
+ free_link_state(link);
- free_link_state(link_state);
+ /* Recheck latencies and configure upstream links */
+ pcie_update_aspm_capable(root);
+ pcie_config_aspm_path(parent_link);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -700,18 +666,23 @@ out:
/* @pdev: the root port or switch downstream port */
void pcie_aspm_pm_state_change(struct pci_dev *pdev)
{
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pcie_link_state *link = pdev->link_state;
- if (aspm_disabled || !pdev->is_pcie || !pdev->link_state)
+ if (aspm_disabled || !pdev->is_pcie || !link)
return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
return;
/*
- * devices changed PM state, we should recheck if latency meets all
- * functions' requirement
+ * Devices changed PM state, we should recheck if latency
+ * meets all functions' requirement
*/
- pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_update_aspm_capable(link->root);
+ pcie_config_aspm_path(link);
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
}
/*
@@ -721,7 +692,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie)
return;
@@ -733,12 +704,16 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link_state = parent->link_state;
- link_state->aspm_support &= ~state;
- __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ link = parent->link_state;
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_disable |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= ASPM_STATE_L1;
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
if (state & PCIE_LINK_STATE_CLKPM) {
- link_state->clkpm_capable = 0;
- pcie_set_clkpm(link_state, 0);
+ link->clkpm_capable = 0;
+ pcie_set_clkpm(link, 0);
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -748,7 +723,7 @@ EXPORT_SYMBOL(pci_disable_link_state);
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pcie_link_state *link_state;
+ struct pcie_link_state *link;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
@@ -761,10 +736,9 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibling) {
- __pcie_aspm_configure_link_state(link_state,
- policy_to_aspm_state(link_state));
- pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
+ list_for_each_entry(link, &link_list, sibling) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -802,18 +776,28 @@ static ssize_t link_state_store(struct device *dev,
size_t n)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int state;
+ struct pcie_link_state *link, *root = pdev->link_state->root;
+ u32 val = buf[0] - '0', state = 0;
- if (n < 1)
+ if (n < 1 || val > 3)
return -EINVAL;
- state = buf[0]-'0';
- if (state >= 0 && state <= 3) {
- /* setup link aspm state */
- pcie_aspm_configure_link_state(pdev->link_state, state);
- return n;
- }
- return -EINVAL;
+ /* Convert requested state to ASPM state */
+ if (val & PCIE_LINK_STATE_L0S)
+ state |= ASPM_STATE_L0S;
+ if (val & PCIE_LINK_STATE_L1)
+ state |= ASPM_STATE_L1;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ pcie_config_aspm_link(link, state);
+ }
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+ return n;
}
static ssize_t clk_ctl_show(struct device *dev,
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 13ffdc35ea0e..52f84fca9f7d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -187,14 +187,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
*/
static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
{
- struct pcie_port_data *port_data = pci_get_drvdata(dev);
int irq, interrupt_mode = PCIE_PORT_NO_IRQ;
int i;
- /* Check MSI quirk */
- if (port_data->port_type == PCIE_RC_PORT && pcie_mch_quirk)
- goto Fallback;
-
/* Try to use MSI-X if supported */
if (!pcie_port_enable_msix(dev, vectors, mask))
return PCIE_PORT_MSIX_MODE;
@@ -203,7 +198,6 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
if (!pci_enable_msi(dev))
interrupt_mode = PCIE_PORT_MSI_MODE;
- Fallback:
if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
interrupt_mode = PCIE_PORT_INTx_MODE;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 091ce70051e0..f635e476d632 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -30,7 +30,6 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/* global data */
-static const char device_name[] = "pcieport-driver";
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
@@ -205,6 +204,7 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
/* If fatal, restore cfg space for possible link reset at upstream */
if (dev->error_state == pci_channel_io_frozen) {
+ dev->state_saved = true;
pci_restore_state(dev);
pcie_portdrv_restore_config(dev);
pci_enable_pcie_error_reporting(dev);
@@ -261,7 +261,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = {
};
static struct pci_driver pcie_portdriver = {
- .name = (char *)device_name,
+ .name = "pcieport",
.id_table = &port_pci_ids[0],
.probe = pcie_portdrv_probe,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 40e75f6a5056..8105e32117f6 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -235,7 +235,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->start = l64;
res->end = l64 + sz64;
dev_printk(KERN_DEBUG, &dev->dev,
- "reg %x 64bit mmio: %pR\n", pos, res);
+ "reg %x %s: %pR\n", pos,
+ (res->flags & IORESOURCE_PREFETCH) ?
+ "64bit mmio pref" : "64bit mmio",
+ res);
}
res->flags |= IORESOURCE_MEM_64;
@@ -249,7 +252,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->end = l + sz;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
- (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
+ (res->flags & IORESOURCE_IO) ? "io port" :
+ ((res->flags & IORESOURCE_PREFETCH) ?
+ "32bit mmio pref" : "32bit mmio"),
res);
}
@@ -692,6 +697,23 @@ static void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
}
+static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+ u32 reg32;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ if (!(reg16 & PCI_EXP_FLAGS_SLOT))
+ return;
+ pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
+ if (reg32 & PCI_EXP_SLTCAP_HPC)
+ pdev->is_hotplug_bridge = 1;
+}
+
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
/**
@@ -799,6 +821,7 @@ int pci_setup_device(struct pci_dev *dev)
pci_read_irq(dev);
dev->transparent = ((dev->class & 0xff) == 1);
pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
+ set_pcie_hotplug_bridge(dev);
break;
case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
@@ -1009,6 +1032,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
+ /* Clear the state_saved flag. */
+ dev->state_saved = false;
+
/* Initialize various capabilities */
pci_init_capabilities(dev);
@@ -1061,8 +1087,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (dev && !dev->is_added) /* new device? */
nr++;
- if ((dev && dev->multifunction) ||
- (!dev && pcibios_scan_all_fns(bus, devfn))) {
+ if (dev && dev->multifunction) {
for (fn = 1; fn < 8; fn++) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 85ce23997be4..a790b1771f9f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -31,8 +31,6 @@ int isa_dma_bridge_buggy;
EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
-int pcie_mch_quirk;
-EXPORT_SYMBOL(pcie_mch_quirk);
#ifdef CONFIG_PCI_QUIRKS
/*
@@ -672,6 +670,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
+/*
+ * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
+ * Disable fast back-to-back on the secondary bus segment
+ */
+static void __devinit quirk_xio2000a(struct pci_dev *dev)
+{
+ struct pci_dev *pdev;
+ u16 command;
+
+ dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
+ "secondary bus fast back-to-back transfers disabled\n");
+ list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (command & PCI_COMMAND_FAST_BACK)
+ pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
+ quirk_xio2000a);
#ifdef CONFIG_X86_IO_APIC
@@ -1203,6 +1220,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
+ case 0x00ba: /* Compaq Evo D510 USDT */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs and on-board VGA
* controller is disabled if an AGP card is
@@ -1501,7 +1519,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_a
static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
{
- pcie_mch_quirk = 1;
+ pci_msi_off(pdev);
+ pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
@@ -1569,10 +1588,8 @@ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
return;
dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
-
- printk(KERN_INFO "PCI quirk: reroute interrupts for 0x%04x:0x%04x\n",
- dev->vendor, dev->device);
- return;
+ dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
@@ -1614,8 +1631,8 @@ static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
- printk(KERN_INFO "disabled boot interrupt on device 0x%04x:0x%04x\n",
- dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
@@ -1647,8 +1664,8 @@ static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
- printk(KERN_INFO "disabled boot interrupts on PCI device"
- "0x%04x:0x%04x\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
@@ -1678,8 +1695,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
pci_config_dword &= ~AMD_813X_NOIOAMODE;
pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
- printk(KERN_INFO "disabled boot interrupts on PCI device "
- "0x%04x:0x%04x\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
@@ -1695,14 +1712,13 @@ static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
if (!pci_config_word) {
- printk(KERN_INFO "boot interrupts on PCI device 0x%04x:0x%04x "
- "already disabled\n",
- dev->vendor, dev->device);
+ dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] "
+ "already disabled\n", dev->vendor, dev->device);
return;
}
pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
- printk(KERN_INFO "disabled boot interrupts on PCI device "
- "0x%04x:0x%04x\n", dev->vendor, dev->device);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
@@ -2384,8 +2400,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
@@ -2494,6 +2512,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
@@ -2572,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
pci_do_fixups(dev, start, end);
}
+
+static int __init pci_apply_final_quirks(void)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ pci_fixup_device(pci_fixup_final, dev);
+ }
+
+ return 0;
+}
+
+fs_initcall_sync(pci_apply_final_quirks);
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
#endif
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index e8cb5051c311..ec415352d9ba 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -113,37 +113,6 @@ pci_find_next_bus(const struct pci_bus *from)
return b;
}
-#ifdef CONFIG_PCI_LEGACY
-/**
- * pci_find_device - begin or continue searching for a PCI device by vendor/device id
- * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
- * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
- * @from: Previous PCI device found in search, or %NULL for new search.
- *
- * Iterates through the list of known PCI devices. If a PCI device is found
- * with a matching @vendor and @device, a pointer to its device structure is
- * returned. Otherwise, %NULL is returned.
- * A new search is initiated by passing %NULL as the @from argument.
- * Otherwise if @from is not %NULL, searches continue from next device
- * on the global list.
- *
- * NOTE: Do not use this function any more; use pci_get_device() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
- struct pci_dev *from)
-{
- struct pci_dev *pdev;
-
- pci_dev_get(from);
- pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
- pci_dev_put(pdev);
- return pdev;
-}
-EXPORT_SYMBOL(pci_find_device);
-#endif /* CONFIG_PCI_LEGACY */
-
/**
* pci_get_slot - locate PCI device for a given PCI slot
* @bus: PCI bus on which desired PCI device resides
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7c443b4583ab..0959430534b2 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -299,8 +299,17 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
r = bus->resource[i];
if (r == &ioport_resource || r == &iomem_resource)
continue;
- if (r && (r->flags & type_mask) == type && !r->parent)
- return r;
+ if (r && (r->flags & type_mask) == type) {
+ if (!r->parent)
+ return r;
+ /*
+ * if there is no child under that, we should release
+ * and use it. don't need to reset it, pbus_size_* will
+ * set it again
+ */
+ if (!r->child && !release_resource(r))
+ return r;
+ }
}
return NULL;
}
@@ -309,7 +318,7 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
since these windows have 4K granularity and the IO ranges
of non-bridge PCI devices are limited to 256 bytes.
We must be careful with the ISA aliasing though. */
-static void pbus_size_io(struct pci_bus *bus)
+static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
@@ -336,6 +345,8 @@ static void pbus_size_io(struct pci_bus *bus)
size1 += r_size;
}
}
+ if (size < min_size)
+ size = min_size;
/* To be fixed in 2.5: we should have sort of HAVE_ISA
flag in the struct pci_bus. */
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
@@ -354,7 +365,8 @@ static void pbus_size_io(struct pci_bus *bus)
/* Calculate the size of the bus and minimal alignment which
guarantees that all child resources fit in this size. */
-static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type)
+static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ unsigned long type, resource_size_t min_size)
{
struct pci_dev *dev;
resource_size_t min_align, align, size;
@@ -404,6 +416,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
+ if (size < min_size)
+ size = min_size;
align = 0;
min_align = 0;
@@ -483,6 +497,7 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
{
struct pci_dev *dev;
unsigned long mask, prefmask;
+ resource_size_t min_mem_size = 0, min_io_size = 0;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -512,8 +527,12 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
case PCI_CLASS_BRIDGE_PCI:
pci_bridge_check_ranges(bus);
+ if (bus->self->is_hotplug_bridge) {
+ min_io_size = pci_hotplug_io_size;
+ min_mem_size = pci_hotplug_mem_size;
+ }
default:
- pbus_size_io(bus);
+ pbus_size_io(bus, min_io_size);
/* If the bridge supports prefetchable range, size it
separately. If it doesn't, or its prefetchable window
has already been allocated by arch code, try
@@ -521,9 +540,11 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus)
resources. */
mask = IORESOURCE_MEM;
prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pbus_size_mem(bus, prefmask, prefmask))
+ if (pbus_size_mem(bus, prefmask, prefmask, min_mem_size))
mask = prefmask; /* Success, size non-prefetch only. */
- pbus_size_mem(bus, mask, IORESOURCE_MEM);
+ else
+ min_mem_size += min_mem_size;
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, min_mem_size);
break;
}
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 88cdd1a937d6..c54526b206b5 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return err;
}
+EXPORT_SYMBOL(pci_claim_resource);
#ifdef CONFIG_PCI_QUIRKS
void pci_disable_bridge_window(struct pci_dev *dev)
@@ -204,43 +205,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
-#if 0
-int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
-{
- struct pci_bus *bus = dev->bus;
- struct resource *res = dev->resource + resno;
- unsigned int type_mask;
- int i, ret = -EBUSY;
-
- type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
-
- for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
- struct resource *r = bus->resource[i];
- if (!r)
- continue;
-
- /* type_mask must match */
- if ((res->flags ^ r->flags) & type_mask)
- continue;
-
- ret = request_resource(r, res);
-
- if (ret == 0)
- break;
- }
-
- if (ret) {
- dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
- } else if (resno < PCI_BRIDGE_RESOURCES) {
- pci_update_resource(dev, resno);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pci_assign_resource_fixed);
-#endif
-
/* Sort resources by alignment */
void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
{