summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_processor.c7
-rw-r--r--drivers/acpi/acpica/exfield.c104
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/ec.c21
-rw-r--r--drivers/ata/Kconfig5
-rw-r--r--drivers/ata/ahci.c35
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/libata-core.c27
-rw-r--r--drivers/ata/pata_arasan_cf.c7
-rw-r--r--drivers/ata/pata_at91.c11
-rw-r--r--drivers/ata/pata_samsung_cf.c10
-rw-r--r--drivers/base/dd.c17
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/block/floppy.c11
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/clk/tegra/clk-tegra124.c3
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/exynos_mct.c12
-rw-r--r--drivers/clocksource/zevio-timer.c7
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm6
-rw-r--r--drivers/cpufreq/longhaul.c36
-rw-r--r--drivers/cpufreq/powernow-k6.c23
-rw-r--r--drivers/cpufreq/powernow-k7.c4
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c1
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c5
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/edma.c6
-rw-r--r--drivers/dma/fsl-edma.c12
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c12
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c1
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c84
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c51
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c22
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/ltc2945.c6
-rw-r--r--drivers/hwmon/vexpress.c83
-rw-r--r--drivers/idle/intel_idle.c3
-rw-r--r--drivers/iio/adc/Kconfig4
-rw-r--r--drivers/iio/adc/at91_adc.c33
-rw-r--r--drivers/iio/adc/exynos_adc.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c7
-rw-r--r--drivers/iio/industrialio-buffer.c6
-rw-r--r--drivers/iio/light/cm32181.c1
-rw-r--r--drivers/iio/light/cm36651.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c39
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h14
-rw-r--r--drivers/input/misc/da9055_onkey.c1
-rw-r--r--drivers/input/misc/soc_button_array.c1
-rw-r--r--drivers/input/mouse/elantech.c1
-rw-r--r--drivers/input/mouse/synaptics.c97
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h15
-rw-r--r--drivers/input/serio/i8042.c6
-rw-r--r--drivers/input/serio/serio.c14
-rw-r--r--drivers/input/tablet/wacom_sys.c246
-rw-r--r--drivers/input/tablet/wacom_wac.c29
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/iommu/arm-smmu.c4
-rw-r--r--drivers/iommu/dmar.c3
-rw-r--r--drivers/iommu/intel-iommu.c10
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c54
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/irqchip/irq-gic.c8
-rw-r--r--drivers/md/dm-cache-target.c1
-rw-r--r--drivers/md/dm-thin.c77
-rw-r--r--drivers/md/dm-verity.c15
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/mtd/ubi/block.c2
-rw-r--r--drivers/mtd/ubi/wl.c6
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c660
-rw-r--r--drivers/net/can/c_can/c_can.h23
-rw-r--r--drivers/net/can/c_can/c_can_pci.c9
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c16
-rw-r--r--drivers/net/can/slcan.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/altera/Kconfig1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c8
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c179
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h3
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h6
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c8
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c77
-rw-r--r--drivers/net/ethernet/arc/emac.h2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/cadence/Kconfig4
-rw-r--r--drivers/net/ethernet/cadence/macb.c35
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c706
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c223
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/jme.c53
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c66
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c22
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c7
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c13
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h4
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/macvtap.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/net/phy/phy.c11
-rw-r--r--drivers/net/slip/slip.c6
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c57
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c6
-rw-r--r--drivers/of/irq.c28
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/selftest.c32
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi13
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile9
-rw-r--r--drivers/phy/phy-core.c3
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c17
-rw-r--r--drivers/pinctrl/pinctrl-single.c13
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c44
-rw-r--r--drivers/pnp/quirks.c79
-rw-r--r--drivers/power/reset/vexpress-poweroff.c19
-rw-r--r--drivers/ptp/Kconfig3
-rw-r--r--drivers/regulator/pbias-regulator.c76
-rw-r--r--drivers/s390/cio/chsc.c22
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/scsi_error.c12
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--drivers/spi/spi-atmel.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c1
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/spi/spi-sirf.c20
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c9
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c2
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c4
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/n_tty.c4
-rw-r--r--drivers/tty/serial/8250/8250_core.c4
-rw-r--r--drivers/tty/serial/8250/8250_dma.c9
-rw-r--r--drivers/tty/serial/samsung.c23
-rw-r--r--drivers/tty/serial/serial_core.c39
-rw-r--r--drivers/tty/tty_buffer.c17
-rw-r--r--drivers/usb/chipidea/core.c37
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/gadget.c12
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c6
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/rndis.c1
-rw-r--r--drivers/usb/gadget/u_ether.c101
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c3
-rw-r--r--drivers/usb/host/ohci-hub.c18
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/ohci.h2
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-ring.c67
-rw-r--r--drivers/usb/host/xhci.c7
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_dsps.c5
-rw-r--r--drivers/usb/musb/omap2430.c8
-rw-r--r--drivers/usb/phy/phy-am335x-control.c9
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c9
-rw-r--r--drivers/usb/phy/phy.c3
-rw-r--r--drivers/usb/serial/io_ti.c50
-rw-r--r--drivers/usb/serial/option.c81
-rw-r--r--drivers/usb/serial/qcserial.c24
-rw-r--r--drivers/usb/serial/usb-serial.c4
-rw-r--r--drivers/usb/storage/shuttle_usbat.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/usb/wusbcore/mmc.c2
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c4
-rw-r--r--drivers/uwb/drp.c9
250 files changed, 3801 insertions, 1684 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index c29c2c3ec0ad..b06f5f55ada9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
+ if (pr->apic_id == -1)
+ return -ENODEV;
+
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0) {
+ if (apic_id < 0)
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- return -ENODEV;
- }
pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 68d97441432c..12878e1982f7 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -45,10 +45,71 @@
#include "accommon.h"
#include "acdispat.h"
#include "acinterp.h"
+#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exfield")
+/* Local prototypes */
+static u32
+acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_serial_access_bytes
+ *
+ * PARAMETERS: accessor_type - The type of the protocol indicated by region
+ * field access attributes
+ * access_length - The access length of the region field
+ *
+ * RETURN: Decoded access length
+ *
+ * DESCRIPTION: This routine returns the length of the generic_serial_bus
+ * protocol bytes
+ *
+ ******************************************************************************/
+
+static u32
+acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
+{
+ u32 length;
+
+ switch (accessor_type) {
+ case AML_FIELD_ATTRIB_QUICK:
+
+ length = 0;
+ break;
+
+ case AML_FIELD_ATTRIB_SEND_RCV:
+ case AML_FIELD_ATTRIB_BYTE:
+
+ length = 1;
+ break;
+
+ case AML_FIELD_ATTRIB_WORD:
+ case AML_FIELD_ATTRIB_WORD_CALL:
+
+ length = 2;
+ break;
+
+ case AML_FIELD_ATTRIB_MULTIBYTE:
+ case AML_FIELD_ATTRIB_RAW_BYTES:
+ case AML_FIELD_ATTRIB_RAW_PROCESS:
+
+ length = access_length;
+ break;
+
+ case AML_FIELD_ATTRIB_BLOCK:
+ case AML_FIELD_ATTRIB_BLOCK_CALL:
+ default:
+
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ break;
+ }
+
+ return (length);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_ex_read_data_from_field
@@ -63,8 +124,9 @@ ACPI_MODULE_NAME("exfield")
* Buffer, depending on the size of the field.
*
******************************************************************************/
+
acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
+acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
union acpi_operand_object *obj_desc,
union acpi_operand_object **ret_buffer_desc)
{
@@ -73,6 +135,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
acpi_size length;
void *buffer;
u32 function;
+ u16 accessor_type;
ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
@@ -116,9 +179,22 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
ACPI_READ | (obj_desc->field.attribute << 16);
} else if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS) {
- length = ACPI_GSBUS_BUFFER_SIZE;
- function =
- ACPI_READ | (obj_desc->field.attribute << 16);
+ accessor_type = obj_desc->field.attribute;
+ length = acpi_ex_get_serial_access_length(accessor_type,
+ obj_desc->
+ field.
+ access_length);
+
+ /*
+ * Add additional 2 bytes for modeled generic_serial_bus data buffer:
+ * typedef struct {
+ * BYTEStatus; // Byte 0 of the data buffer
+ * BYTELength; // Byte 1 of the data buffer
+ * BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
+ * }
+ */
+ length += 2;
+ function = ACPI_READ | (accessor_type << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -231,6 +307,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
void *buffer;
union acpi_operand_object *buffer_desc;
u32 function;
+ u16 accessor_type;
ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
@@ -284,9 +361,22 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
ACPI_WRITE | (obj_desc->field.attribute << 16);
} else if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_GSBUS) {
- length = ACPI_GSBUS_BUFFER_SIZE;
- function =
- ACPI_WRITE | (obj_desc->field.attribute << 16);
+ accessor_type = obj_desc->field.attribute;
+ length = acpi_ex_get_serial_access_length(accessor_type,
+ obj_desc->
+ field.
+ access_length);
+
+ /*
+ * Add additional 2 bytes for modeled generic_serial_bus data buffer:
+ * typedef struct {
+ * BYTEStatus; // Byte 0 of the data buffer
+ * BYTELength; // Byte 1 of the data buffer
+ * BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
+ * }
+ */
+ length += 2;
+ function = ACPI_WRITE | (accessor_type << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index e7e5844c87d0..cf925c4f36b7 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -380,9 +380,8 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break;
default:
- acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
- ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
- goto err;
+ acpi_handle_debug(handle, "Unknown event type 0x%x\n", type);
+ break;
}
adev = acpi_bus_get_acpi_device(handle);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d7d32c28829b..ad11ba4a412d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -206,13 +206,13 @@ unlock:
spin_unlock_irqrestore(&ec->lock, flags);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec);
+ return acpi_ec_sync_query(ec, NULL);
}
return 0;
}
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle);
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
/*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
* Run with locked ec mutex.
*/
static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_query_unlocked(ec, &value);
+ status = acpi_ec_sync_query(ec, &value);
if (status || !value)
break;
}
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
kfree(handler);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int status;
struct acpi_ec_query_handler *handler, *copy;
- if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (data)
+ *data = value;
+ if (status)
return status;
+
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
if (!ec)
return;
mutex_lock(&ec->mutex);
- acpi_ec_sync_query(ec);
+ acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->mutex);
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 20e03a7eb8b4..c2706047337f 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -116,7 +116,7 @@ config AHCI_ST
config AHCI_IMX
tristate "Freescale i.MX AHCI SATA support"
- depends on MFD_SYSCON
+ depends on MFD_SYSCON && (ARCH_MXC || COMPILE_TEST)
help
This option enables support for the Freescale i.MX SoC's
onboard AHCI SATA.
@@ -134,8 +134,7 @@ config AHCI_SUNXI
config AHCI_XGENE
tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
- depends on ARM64 || COMPILE_TEST
- select PHY_XGENE
+ depends on PHY_XGENE
help
This option enables support for APM X-Gene SoC SATA host controller.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5a0bf8ed649b..71e15b73513d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1164,9 +1164,9 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
#endif
static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv)
+ struct ahci_host_priv *hpriv)
{
- int nvec;
+ int rc, nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
goto intx;
@@ -1183,12 +1183,19 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
if (nvec < n_ports)
goto single_msi;
- nvec = pci_enable_msi_range(pdev, nvec, nvec);
- if (nvec == -ENOSPC)
+ rc = pci_enable_msi_exact(pdev, nvec);
+ if (rc == -ENOSPC)
goto single_msi;
- else if (nvec < 0)
+ else if (rc < 0)
goto intx;
+ /* fallback to single MSI mode if the controller enforced MRSM mode */
+ if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
+ pci_disable_msi(pdev);
+ printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
+ goto single_msi;
+ }
+
return nvec;
single_msi:
@@ -1232,18 +1239,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
return rc;
for (i = 0; i < host->n_ports; i++) {
- const char* desc;
struct ahci_port_priv *pp = host->ports[i]->private_data;
- /* pp is NULL for dummy ports */
- if (pp)
- desc = pp->irq_desc;
- else
- desc = dev_driver_string(host->dev);
+ /* Do not receive interrupts sent by dummy ports */
+ if (!pp) {
+ disable_irq(irq + i);
+ continue;
+ }
- rc = devm_request_threaded_irq(host->dev,
- irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
- desc, host->ports[i]);
+ rc = devm_request_threaded_irq(host->dev, irq + i,
+ ahci_hw_interrupt,
+ ahci_thread_fn, IRQF_SHARED,
+ pp->irq_desc, host->ports[i]);
if (rc)
goto out_free_irqs;
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 51af275b3388..b5eb886da226 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -94,6 +94,7 @@ enum {
/* HOST_CTL bits */
HOST_RESET = (1 << 0), /* reset controller; self-clear */
HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
+ HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */
HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
/* HOST_CAP bits */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c19734d96d7e..943cc8b83e59 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4224,8 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4792,21 +4794,26 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
- unsigned int i;
+ unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- /* the last tag is reserved for internal command. */
- for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
- if (!test_and_set_bit(i, &ap->qc_allocated)) {
- qc = __ata_qc_from_tag(ap, i);
+ for (i = 0; i < ATA_MAX_QUEUE; i++) {
+ tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+
+ /* the last tag is reserved for internal command. */
+ if (tag == ATA_TAG_INTERNAL)
+ continue;
+
+ if (!test_and_set_bit(tag, &ap->qc_allocated)) {
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = tag;
+ ap->last_tag = tag;
break;
}
-
- if (qc)
- qc->tag = i;
+ }
return qc;
}
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 6fac524c2f50..4edb1a81f63f 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -898,9 +898,12 @@ static int arasan_cf_probe(struct platform_device *pdev)
cf_card_detect(acdev, 0);
- return ata_host_activate(host, acdev->irq, irq_handler, 0,
- &arasan_cf_sht);
+ ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
+ &arasan_cf_sht);
+ if (!ret)
+ return 0;
+ cf_exit(acdev);
free_clk:
clk_put(acdev->clk);
return ret;
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index e9c87274a781..8a66f23af4c4 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -407,12 +407,13 @@ static int pata_at91_probe(struct platform_device *pdev)
host->private_data = info;
- return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
- gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
- irq_flags, &pata_at91_sht);
+ ret = ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+ gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
+ irq_flags, &pata_at91_sht);
+ if (ret)
+ goto err_put;
- if (!ret)
- return 0;
+ return 0;
err_put:
clk_put(info->mck);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index a79566d05666..0610e78c8a2a 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -594,9 +594,13 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- return ata_host_activate(host, info->irq,
- info->irq ? pata_s3c_irq : NULL,
- 0, &pata_s3c_sht);
+ ret = ata_host_activate(host, info->irq,
+ info->irq ? pata_s3c_irq : NULL,
+ 0, &pata_s3c_sht);
+ if (ret)
+ goto stop_clk;
+
+ return 0;
stop_clk:
clk_disable(info->clk);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 8986b9f22781..62ec61e8f84a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
*/
static void driver_deferred_probe_trigger(void)
{
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
+ atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
+ int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
/* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e714709704e4..5b47210889e0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO;
return dev->archdata.irqs[num];
#else
- struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ struct resource *r;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
+ return of_irq_get(dev->dev.of_node, num);
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
#endif
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 8f5565bf34cd..fa9bb742df6e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
int ret;
while (ptr) {
- ret = copy_to_user(param, ptr, sizeof(*ptr));
+ struct floppy_raw_cmd cmd = *ptr;
+ cmd.next = NULL;
+ cmd.kernel_data = NULL;
+ ret = copy_to_user(param, &cmd, sizeof(cmd));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
@@ -3121,10 +3124,11 @@ loop:
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
- if (ret)
- return -EFAULT;
ptr->next = NULL;
ptr->buffer_length = 0;
+ ptr->kernel_data = NULL;
+ if (ret)
+ return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
@@ -3140,7 +3144,6 @@ loop:
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
- ptr->kernel_data = NULL;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index be571fef185d..a83b57e57b63 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3004) },
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x04CA, 0x3006) },
+ { USB_DEVICE(0x04CA, 0x3007) },
{ USB_DEVICE(0x04CA, 0x3008) },
{ USB_DEVICE(0x04CA, 0x300b) },
{ USB_DEVICE(0x0930, 0x0219) },
@@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index f338b0c5a8de..a7dfbf9a3afb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
@@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
- if (id->driver_info & BTUSB_INTEL) {
- usb_enable_autosuspend(data->udev);
+ if (id->driver_info & BTUSB_INTEL)
hdev->setup = btusb_setup_intel;
- }
/* Interface numbers are hardcoded in the specification */
data->isoc = usb_ifnum_to_if(data->udev, 1);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 166e02f16c8a..cc37c342c4cb 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -764,7 +764,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
- [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
[tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
[tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
@@ -809,7 +808,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_trace] = { .dt_id = TEGRA124_CLK_TRACE, .present = true },
[tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
[tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
- [tegra_clk_ndspeed] = { .dt_id = TEGRA124_CLK_NDSPEED, .present = true },
[tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
[tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
[tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
@@ -952,7 +950,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
[tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
[tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
- [tegra_clk_uarte] = { .dt_id = TEGRA124_CLK_UARTE, .present = true },
};
static struct tegra_devclk devclks[] __initdata = {
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index 2dc8b41a339d..422391242b39 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -100,9 +100,11 @@ void __init vexpress_osc_of_setup(struct device_node *node)
struct clk *clk;
u32 range[2];
+ vexpress_sysreg_of_early_init();
+
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
if (!osc)
- goto error;
+ return;
osc->func = vexpress_config_func_get_by_node(node);
if (!osc->func) {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57e823c44d2a..5163ec13429d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true;
+static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
/*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_CP15_TIMER) {
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
}
}
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
arch_timer_register();
arch_timer_common_init();
}
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index a6ee6d7cd63f..acf5a329d538 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -416,8 +416,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->set_mode = exynos4_tick_set_mode;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
evt->rating = 450;
- clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
- 0xf, 0x7fffffff);
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@@ -430,9 +428,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
evt->irq);
return -EIO;
}
+ irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
} else {
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
}
+ clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
+ 0xf, 0x7fffffff);
return 0;
}
@@ -450,7 +451,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
struct mct_clock_event_device *mevt;
- unsigned int cpu;
/*
* Grab cpu pointer in each case to avoid spurious
@@ -461,12 +461,6 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_setup(&mevt->evt);
break;
- case CPU_ONLINE:
- cpu = (unsigned long)hcpu;
- if (mct_int_type == MCT_INT_SPI)
- irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
- cpumask_of(cpu));
- break;
case CPU_DYING:
mevt = this_cpu_ptr(&percpu_mct_tick);
exynos4_local_timer_stop(&mevt->evt);
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ca81809d159d..7ce442148c3f 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -212,4 +212,9 @@ error_free:
return ret;
}
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
+static void __init zevio_timer_init(struct device_node *node)
+{
+ BUG_ON(zevio_timer_add(node));
+}
+
+CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 148d707a1d43..ccdd4c7e748b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
return;
/* Can only change if privileged. */
- if (!capable(CAP_NET_ADMIN)) {
+ if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
err = EPERM;
goto out;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0e9cce82844b..580503513f0f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -92,11 +92,7 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
- depends on ARCH_HIGHBANK
- select GENERIC_CPUFREQ_CPU0
- select PM_OPP
- select REGULATOR
-
+ depends on ARCH_HIGHBANK && GENERIC_CPUFREQ_CPU0 && REGULATOR
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index d00e5d1abd25..5c4369b5d834 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
* Sets a new clock ratio.
*/
-static void longhaul_setstate(struct cpufreq_policy *policy,
+static int longhaul_setstate(struct cpufreq_policy *policy,
unsigned int table_index)
{
unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
/* Safety precautions */
mult = mults[mults_index & 0x1f];
if (mult == -1)
- return;
+ return -EINVAL;
+
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
- return;
+ return -EINVAL;
+
/* Voltage transition before frequency transition? */
if (can_scale_voltage && longhaul_index < table_index)
dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
- cpufreq_freq_transition_begin(policy, &freqs);
-
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
goto retry_loop;
}
}
- /* Report true CPU frequency */
- cpufreq_freq_transition_end(policy, &freqs, 0);
- if (!bm_timeout)
+ if (!bm_timeout) {
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
"idle PCI bus.\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
/*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
+ int retval = 0;
if (!can_scale_voltage)
- longhaul_setstate(policy, table_index);
+ retval = longhaul_setstate(policy, table_index);
else {
/* On test system voltage transitions exceeding single
* step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
while (i != table_index) {
vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
if (vid != current_vid) {
- longhaul_setstate(policy, i);
+ retval = longhaul_setstate(policy, i);
current_vid = vid;
msleep(200);
}
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
else
i--;
}
- longhaul_setstate(policy, table_index);
+ retval = longhaul_setstate(policy, table_index);
}
+
longhaul_index = table_index;
- return 0;
+ return retval;
}
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = longhaul_table[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
longhaul_setstate(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
break;
}
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 49f120e1bc7b..78904e6ca4a0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
- struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
return -EINVAL;
}
- freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
- freqs.new = busfreq * clock_ratio[best_i].driver_data;
-
- cpufreq_freq_transition_begin(policy, &freqs);
-
powernow_k6_set_cpu_multiplier(best_i);
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
return 0;
}
@@ -227,9 +219,20 @@ have_busfreq:
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
- for (i = 0; i < 8; i++) {
- if (i == max_multiplier)
+
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == max_multiplier) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = clock_ratio[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
}
return 0;
}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index f911645c3f6d..e61e224475ad 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
freqs.new = powernow_table[index].frequency;
- cpufreq_freq_transition_begin(policy, &freqs);
-
/* Now do the magic poking into the MSRs. */
if (have_a0 == 1) /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
if (have_a0 == 1)
local_irq_enable();
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
return 0;
}
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 9edccc63245d..af4968813e76 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -29,6 +29,7 @@
#include <asm/cputhreads.h>
#include <asm/reg.h>
+#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#define POWERNV_MAX_PSTATES 256
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index b7e677be1df0..0af618abebaf 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
+ u64 transition_latency_hz;
np = of_get_cpu_node(cpu, NULL);
if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data;
+ transition_latency_hz = 12ULL * NSEC_PER_SEC;
policy->cpuinfo.transition_latency =
- (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
+ do_div(transition_latency_hz, fsl_get_sys_freq());
+
of_node_put(np);
return 0;
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 8d045afa7fb4..6f9dfa80563a 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -60,9 +60,7 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->clk = clk_get(NULL, "MAIN_CLK");
- if (IS_ERR(policy->clk))
- return PTR_ERR(policy->clk);
- return 0;
+ return PTR_ERR_OR_ZERO(policy->clk);
}
static struct cpufreq_driver ucv2_driver = {
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ba06d1d2f99e..5c5863842de9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -197,7 +197,7 @@ config AMCC_PPC440SPE_ADMA
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
- depends on MFD_TIMBERDALE || HAS_IOMEM
+ depends on MFD_TIMBERDALE
select DMA_ENGINE
help
Enable support for the Timberdale FPGA DMA engine.
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index cd04eb7b182e..926360c2db6a 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -182,11 +182,13 @@ static void edma_execute(struct edma_chan *echan)
echan->ecc->dummy_slot);
}
- edma_resume(echan->ch_num);
-
if (edesc->processed <= MAX_NR_SG) {
dev_dbg(dev, "first transfer starting %d\n", echan->ch_num);
edma_start(echan->ch_num);
+ } else {
+ dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
+ echan->ch_num, edesc->processed);
+ edma_resume(echan->ch_num);
}
/*
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 381e793184ba..b396a7fb53ab 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -968,7 +968,17 @@ static struct platform_driver fsl_edma_driver = {
.remove = fsl_edma_remove,
};
-module_platform_driver(fsl_edma_driver);
+static int __init fsl_edma_init(void)
+{
+ return platform_driver_register(&fsl_edma_driver);
+}
+subsys_initcall(fsl_edma_init);
+
+static void __exit fsl_edma_exit(void)
+{
+ platform_driver_unregister(&fsl_edma_driver);
+}
+module_exit(fsl_edma_exit);
MODULE_ALIAS("platform:fsl-edma");
MODULE_DESCRIPTION("Freescale eDMA engine driver");
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index a1bd8298d55f..03f7820fa333 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -666,7 +666,7 @@ static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
struct sirfsoc_dma *sdma = ofdma->of_dma_data;
unsigned int request = dma_spec->args[0];
- if (request > SIRFSOC_DMA_CHANNELS)
+ if (request >= SIRFSOC_DMA_CHANNELS)
return NULL;
return dma_get_slave_channel(&sdma->channels[request].chan);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index bf0f8b476696..401add28933f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -233,7 +233,7 @@ static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
{
struct gpio_chip *chip = acpi_gpio->chip;
- if (!chip->dev || !chip->to_irq)
+ if (!chip->to_irq)
return;
INIT_LIST_HEAD(&acpi_gpio->events);
@@ -253,7 +253,7 @@ static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
struct acpi_gpio_event *event, *ep;
struct gpio_chip *chip = acpi_gpio->chip;
- if (!chip->dev || !chip->to_irq)
+ if (!chip->to_irq)
return;
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
@@ -451,7 +451,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
if (function == ACPI_WRITE)
gpiod_set_raw_value(desc, !!((1 << i) & *value));
else
- *value |= gpiod_get_raw_value(desc) << i;
+ *value |= (u64)gpiod_get_raw_value(desc) << i;
}
out:
@@ -501,6 +501,9 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
acpi_handle handle;
acpi_status status;
+ if (!chip || !chip->dev)
+ return;
+
handle = ACPI_HANDLE(chip->dev);
if (!handle)
return;
@@ -531,6 +534,9 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
acpi_handle handle;
acpi_status status;
+ if (!chip || !chip->dev)
+ return;
+
handle = ACPI_HANDLE(chip->dev);
if (!handle)
return;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 761013f8b82f..f48817d97480 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1387,8 +1387,8 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
{
struct gpio_chip *chip = d->host_data;
- irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
irq_set_chip_data(irq, chip);
+ irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index e930d4fe29c7..1ef5ab9c9d51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
plane->crtc = crtc;
plane->fb = crtc->primary->fb;
+ drm_framebuffer_reference(plane->fb);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index c786cd4f457b..2a3ad24276f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
- DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
buffer->size);
return &exynos_gem_obj->base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index eb73e3bf2a0c..4ac438187568 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsi->reg_base) {
+ if (IS_ERR(dsi->reg_base)) {
dev_err(&pdev->dev, "failed to remap io region\n");
- return -EADDRNOTAVAIL;
+ return PTR_ERR(dsi->reg_base);
}
dsi->phy = devm_phy_get(&pdev->dev, "dsim");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 7afead9c3f30..852f2dadaebd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
win_data->enabled = true;
- DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ab5e93c30aa2..62a5c3627b90 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
/* Full ppgtt disabled by default for now due to issues. */
if (full)
- return false; /* HAS_PPGTT(dev) */
+ return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
else
return HAS_ALIASING_PPGTT(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7753249b3a95..f98ba4e6e70b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
- WARN_ONCE(hpd[i] & hotplug_trigger &&
- dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
- "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
- hotplug_trigger, i, hpd[i]);
+ if (hpd[i] & hotplug_trigger &&
+ dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+ "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+ hotplug_trigger, i, hpd[i]);
+
+ continue;
+ }
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9f5b18d9d885..c77af69c2d8f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -827,6 +827,7 @@ enum punit_power_well {
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
+# define STOP_RING (1 << 8)
#define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dae976f51d83..69bcc42a0e44 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- PIPE_CONF_CHECK_I(gmch_pfit.control);
- /* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ /*
+ * FIXME: BIOS likes to set up a cloned config with lvds+external
+ * screen. Since we don't yet re-compute the pipe config when moving
+ * just the lvds port away to another pipe the sw tracking won't match.
+ *
+ * Proper atomic modesets with recomputed global state will fix this.
+ * Until then just don't check gmch state for inherited modes.
+ */
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ }
+
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
base.head) {
memset(&crtc->config, 0, sizeof(crtc->config));
+ crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d2a55884ad52..dfa85289f28f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
bool has_dpcd;
@@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
+ /* The VDD bit needs a power domain reference, so if the bit is already
+ * enabled when we boot, grab this reference. */
+ if (edp_have_panel_vdd(intel_dp)) {
+ enum intel_display_power_domain power_domain;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+ }
+
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0542de982260..328b1a70264b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,7 +236,8 @@ struct intel_crtc_config {
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
/* User requested mode, only valid as a starting point to
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b4d44e62f0c7..fce4a0d93c0b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
+ if (intel_fb &&
+ (sizes->fb_width > intel_fb->base.width ||
+ sizes->fb_height > intel_fb->base.height)) {
+ DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
+ drm_framebuffer_unreference(&intel_fb->base);
+ intel_fb = ifbdev->fb = NULL;
+ }
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b0413e190625..157267aa3561 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+ if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+ true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
- int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6bc68bdcf433..79fb4cc2137c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
I915_WRITE(HWS_PGA, addr);
}
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_ring_buffer *ring)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = ring->obj;
- int ret = 0;
- u32 head;
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ if (!IS_GEN2(ring->dev)) {
+ I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ return false;
+ }
+ }
- /* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
- DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
- if (I915_NEED_GFX_HWS(dev))
- intel_ring_setup_status_page(ring);
- else
- ring_setup_phys_status_page(ring);
+ if (!IS_GEN2(ring->dev)) {
+ (void)I915_READ_CTL(ring);
+ I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ }
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ if (!stop_ring(ring)) {
+ /* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_TAIL(ring),
I915_READ_START(ring));
- I915_WRITE_HEAD(ring, 0);
-
- if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ if (!stop_ring(ring)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
}
}
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
+
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 270a6a973438..2b91c4b4d34b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 3e6c0f3ed592..ef9957dbac94 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
- MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ mdp4_kms->blank_cursor_iova);
}
/* and drop the iova ref + obj rev when done scanning out: */
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
if (old_bo) {
/* drop our previous reference: */
- msm_gem_put_iova(old_bo, mdp4_kms->id);
- drm_gem_object_unreference_unlocked(old_bo);
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
}
- crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index c740ccd1cc67..8edd531cb621 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
VERB("status=%08x", status);
+ mdp_dispatch_irqs(mdp_kms, status);
+
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp4_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
- mdp_dispatch_irqs(mdp_kms, status);
-
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 272e707c9487..0bb4faa17523 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ if (mdp4_kms->blank_cursor_iova)
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ if (mdp4_kms->blank_cursor_bo)
+ drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms);
}
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ mutex_lock(&dev->struct_mutex);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+ ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+ dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ mdp4_kms->blank_cursor_bo = NULL;
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ &mdp4_kms->blank_cursor_iova);
+ if (ret) {
+ dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ goto fail;
+ }
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 66a4d31aec80..715520c54cde 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -44,6 +44,10 @@ struct mdp4_kms {
struct clk *lut_clk;
struct mdp_irq error_handler;
+
+ /* empty/blank cursor bo to use when cursor is "disabled" */
+ struct drm_gem_object *blank_cursor_bo;
+ uint32_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 353d494a497f..f2b985bc2adf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
VERB("status=%08x", status);
+ mdp_dispatch_irqs(mdp_kms, status);
+
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
-
- mdp_dispatch_irqs(mdp_kms, status);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 6c6d7d4c9b4e..a752ab83b810 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dma_addr_t paddr;
int ret, size;
- /* only doing ARGB32 since this is what is needed to alpha-blend
- * with video overlays:
- */
sizes->surface_bpp = 32;
- sizes->surface_depth = 32;
+ sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3da8264d3039..bb8026daebc9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
if (iommu_present(&platform_bus_type))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
+ else {
drm_mm_remove_node(msm_obj->vram_node);
+ drm_free_large(msm_obj->pages);
+ }
msm_obj->pages = NULL;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 15936524f226..bc0119fb6c12 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -209,6 +209,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
{
int ret;
+ radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer;
ret = drm_dp_aux_register_i2c_bus(&radeon_connector->ddc_bus->aux);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 89b4afa5041c..f7e46cf682af 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -597,7 +597,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
tmp = 0xCAFEDEAD;
writel(tmp, ptr);
- r = radeon_ring_lock(rdev, ring, 4);
+ r = radeon_ring_lock(rdev, ring, 5);
if (r) {
DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
return r;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index cbf7e3269f84..9c61b74ef441 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -158,16 +158,18 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
u32 line_time_us, vblank_lines;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
- break;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+ radeon_crtc->hw_mode.clock;
+ vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2);
+ vblank_time_us = vblank_lines * line_time_us;
+ break;
+ }
}
}
@@ -181,14 +183,15 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
struct radeon_crtc *radeon_crtc;
u32 vrefresh = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- vrefresh = radeon_crtc->hw_mode.vrefresh;
- break;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ vrefresh = radeon_crtc->hw_mode.vrefresh;
+ break;
+ }
}
}
-
return vrefresh;
}
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index dedea72f48c4..a9fb0d016d38 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -528,6 +528,13 @@ static bool radeon_atpx_detect(void)
has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
}
+ /* some newer PX laptops mark the dGPU as a non-VGA display device */
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
if (has_atpx && vga_count == 2) {
acpi_get_name(radeon_atpx_priv.atpx.handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2f7cbb901fb1..8d99d5ee8014 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -840,6 +840,38 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
}
/**
+ * avivo_get_fb_ref_div - feedback and ref divider calculation
+ *
+ * @nom: nominator
+ * @den: denominator
+ * @post_div: post divider
+ * @fb_div_max: feedback divider maximum
+ * @ref_div_max: reference divider maximum
+ * @fb_div: resulting feedback divider
+ * @ref_div: resulting reference divider
+ *
+ * Calculate feedback and reference divider for a given post divider. Makes
+ * sure we stay within the limits.
+ */
+static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
+ unsigned fb_div_max, unsigned ref_div_max,
+ unsigned *fb_div, unsigned *ref_div)
+{
+ /* limit reference * post divider to a maximum */
+ ref_div_max = min(210 / post_div, ref_div_max);
+
+ /* get matching reference and feedback divider */
+ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+
+ /* limit fb divider to its maximum */
+ if (*fb_div > fb_div_max) {
+ *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+ *fb_div = fb_div_max;
+ }
+}
+
+/**
* radeon_compute_pll_avivo - compute PLL paramaters
*
* @pll: information about the PLL
@@ -860,6 +892,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
u32 *ref_div_p,
u32 *post_div_p)
{
+ unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
+ freq : freq / 10;
+
unsigned fb_div_min, fb_div_max, fb_div;
unsigned post_div_min, post_div_max, post_div;
unsigned ref_div_min, ref_div_max, ref_div;
@@ -880,14 +915,18 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
ref_div_min = pll->reference_div;
else
ref_div_min = pll->min_ref_div;
- ref_div_max = pll->max_ref_div;
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
+ pll->flags & RADEON_PLL_USE_REF_DIV)
+ ref_div_max = pll->reference_div;
+ else
+ ref_div_max = pll->max_ref_div;
/* determine allowed post divider range */
if (pll->flags & RADEON_PLL_USE_POST_DIV) {
post_div_min = pll->post_div;
post_div_max = pll->post_div;
} else {
- unsigned target_clock = freq / 10;
unsigned vco_min, vco_max;
if (pll->flags & RADEON_PLL_IS_LCD) {
@@ -898,6 +937,11 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
vco_max = pll->pll_out_max;
}
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ vco_min *= 10;
+ vco_max *= 10;
+ }
+
post_div_min = vco_min / target_clock;
if ((target_clock * post_div_min) < vco_min)
++post_div_min;
@@ -912,7 +956,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
}
/* represent the searched ratio as fractional number */
- nom = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ? freq : freq / 10;
+ nom = target_clock;
den = pll->reference_freq;
/* reduce the numbers to a simpler ratio */
@@ -926,7 +970,12 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
diff_best = ~0;
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
- unsigned diff = abs(den - den / post_div * post_div);
+ unsigned diff;
+ avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
+ ref_div_max, &fb_div, &ref_div);
+ diff = abs(target_clock - (pll->reference_freq * fb_div) /
+ (ref_div * post_div));
+
if (diff < diff_best || (diff == diff_best &&
!(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
@@ -936,28 +985,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
}
post_div = post_div_best;
- /* limit reference * post divider to a maximum */
- ref_div_max = min(210 / post_div, ref_div_max);
-
- /* get matching reference and feedback divider */
- ref_div = max(DIV_ROUND_CLOSEST(den, post_div), 1u);
- fb_div = DIV_ROUND_CLOSEST(nom * ref_div * post_div, den);
-
- /* we're almost done, but reference and feedback
- divider might be to large now */
-
- nom = fb_div;
- den = ref_div;
-
- if (fb_div > fb_div_max) {
- ref_div = DIV_ROUND_CLOSEST(den * fb_div_max, nom);
- fb_div = fb_div_max;
- }
-
- if (ref_div > ref_div_max) {
- ref_div = ref_div_max;
- fb_div = DIV_ROUND_CLOSEST(nom * ref_div_max, den);
- }
+ /* get the feedback and reference divider for the optimal value */
+ avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+ &fb_div, &ref_div);
/* reduce the numbers to a simpler ratio once more */
/* this also makes sure that the reference divider is large enough */
@@ -979,7 +1009,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
*post_div_p = post_div;
DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
- freq, *dot_clock_p, *fb_div_p, *frac_fb_div_p,
+ freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
ref_div, post_div);
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index fb3d13f693dd..0cc47f12d995 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -107,11 +107,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
flags |= RADEON_IS_PCI;
}
- if (radeon_runtime_pm == 1)
- flags |= RADEON_IS_PX;
- else if ((radeon_runtime_pm == -1) &&
- radeon_has_atpx() &&
- ((flags & RADEON_IS_IGP) == 0))
+ if ((radeon_runtime_pm != 0) &&
+ radeon_has_atpx() &&
+ ((flags & RADEON_IS_IGP) == 0))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index ee738a524639..6fac8efe8340 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -603,7 +603,6 @@ static const struct attribute_group *hwmon_groups[] = {
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
- struct device *hwmon_dev;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
@@ -616,11 +615,11 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
- hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
- "radeon", rdev,
- hwmon_groups);
- if (IS_ERR(hwmon_dev)) {
- err = PTR_ERR(hwmon_dev);
+ rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
+ "radeon", rdev,
+ hwmon_groups);
+ if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+ err = PTR_ERR(rdev->pm.int_hwmon_dev);
dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err);
}
@@ -632,6 +631,12 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
return err;
}
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.int_hwmon_dev)
+ hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+}
+
static void radeon_dpm_thermal_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -1257,6 +1262,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RV670:
case CHIP_RS780:
case CHIP_RS880:
+ case CHIP_RV770:
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
@@ -1273,7 +1279,6 @@ int radeon_pm_init(struct radeon_device *rdev)
else
rdev->pm.pm_method = PM_METHOD_PROFILE;
break;
- case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
@@ -1353,6 +1358,8 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
device_remove_file(rdev->dev, &dev_attr_power_method);
}
+ radeon_hwmon_fini(rdev);
+
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
}
@@ -1372,6 +1379,8 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
}
radeon_dpm_fini(rdev);
+ radeon_hwmon_fini(rdev);
+
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
}
@@ -1397,12 +1406,14 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
rdev->pm.active_crtcs = 0;
rdev->pm.active_crtc_count = 0;
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (radeon_crtc->enabled) {
- rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
- rdev->pm.active_crtc_count++;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.active_crtc_count++;
+ }
}
}
@@ -1469,12 +1480,14 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled) {
- rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
- rdev->pm.dpm.new_active_crtc_count++;
+ if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled) {
+ rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.dpm.new_active_crtc_count++;
+ }
}
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 36c717af6cf9..edb871d7d395 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
- list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+ drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
if (plane->crtc == crtc) {
tegra_plane_disable(plane);
plane->crtc = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 931490b9cfed..87df0b3674fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
+ SVGA3dCmdSurfaceDMASuffix *suffix;
+ uint32_t bo_size;
cmd = container_of(header, struct vmw_dma_cmd, header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ header->size - sizeof(*suffix));
+
+ /* Make sure device and verifier stays in sync. */
+ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+ DRM_ERROR("Invalid DMA suffix size.\n");
+ return -EINVAL;
+ }
+
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
+ /* Make sure DMA doesn't cross BO boundaries. */
+ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+ DRM_ERROR("Invalid DMA offset.\n");
+ return -EINVAL;
+ }
+
+ bo_size -= cmd->dma.guest.ptr.offset;
+ if (unlikely(suffix->maximumOffset > bo_size))
+ suffix->maximumOffset = bo_size;
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6d02e3b06375..d76f0b70c6e0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
- val = (eax >> 16) & 0x7f;
+ val = (eax >> 16) & 0xff;
/*
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val >= 85) {
+ if (val) {
dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index c104cc32989d..c9cddf5f056b 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -1,4 +1,4 @@
-/*
+ /*
* Driver for Linear Technology LTC2945 I2C Power Monitor
*
* Copyright (c) 2014 Guenter Roeck
@@ -314,8 +314,8 @@ static ssize_t ltc2945_reset_history(struct device *dev,
reg = LTC2945_MAX_ADIN_H;
break;
default:
- BUG();
- break;
+ WARN_ONCE(1, "Bad register: 0x%x\n", reg);
+ return -EINVAL;
}
/* Reset maximum */
ret = regmap_bulk_write(regmap, reg, buf_max, num_regs);
diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c
index d867e6bb2be1..8242b75d96c8 100644
--- a/drivers/hwmon/vexpress.c
+++ b/drivers/hwmon/vexpress.c
@@ -27,15 +27,15 @@
struct vexpress_hwmon_data {
struct device *hwmon_dev;
struct vexpress_config_func *func;
+ const char *name;
};
static ssize_t vexpress_hwmon_name_show(struct device *dev,
struct device_attribute *dev_attr, char *buffer)
{
- const char *compatible = of_get_property(dev->of_node, "compatible",
- NULL);
+ struct vexpress_hwmon_data *data = dev_get_drvdata(dev);
- return sprintf(buffer, "%s\n", compatible);
+ return sprintf(buffer, "%s\n", data->name);
}
static ssize_t vexpress_hwmon_label_show(struct device *dev,
@@ -43,9 +43,6 @@ static ssize_t vexpress_hwmon_label_show(struct device *dev,
{
const char *label = of_get_property(dev->of_node, "label", NULL);
- if (!label)
- return -ENOENT;
-
return snprintf(buffer, PAGE_SIZE, "%s\n", label);
}
@@ -84,6 +81,20 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev,
to_sensor_dev_attr(dev_attr)->index));
}
+static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
+
+ if (dev_attr->show == vexpress_hwmon_label_show &&
+ !of_get_property(dev->of_node, "label", NULL))
+ return 0;
+
+ return attr->mode;
+}
+
static DEVICE_ATTR(name, S_IRUGO, vexpress_hwmon_name_show, NULL);
#define VEXPRESS_HWMON_ATTRS(_name, _label_attr, _input_attr) \
@@ -94,14 +105,27 @@ struct attribute *vexpress_hwmon_attrs_##_name[] = { \
NULL \
}
+struct vexpress_hwmon_type {
+ const char *name;
+ const struct attribute_group **attr_groups;
+};
+
#if !defined(CONFIG_REGULATOR_VEXPRESS)
static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1000);
static VEXPRESS_HWMON_ATTRS(volt, in1_label, in1_input);
static struct attribute_group vexpress_hwmon_group_volt = {
+ .is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_volt,
};
+static struct vexpress_hwmon_type vexpress_hwmon_volt = {
+ .name = "vexpress_volt",
+ .attr_groups = (const struct attribute_group *[]) {
+ &vexpress_hwmon_group_volt,
+ NULL,
+ },
+};
#endif
static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
@@ -109,52 +133,84 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1000);
static VEXPRESS_HWMON_ATTRS(amp, curr1_label, curr1_input);
static struct attribute_group vexpress_hwmon_group_amp = {
+ .is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_amp,
};
+static struct vexpress_hwmon_type vexpress_hwmon_amp = {
+ .name = "vexpress_amp",
+ .attr_groups = (const struct attribute_group *[]) {
+ &vexpress_hwmon_group_amp,
+ NULL
+ },
+};
static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1000);
static VEXPRESS_HWMON_ATTRS(temp, temp1_label, temp1_input);
static struct attribute_group vexpress_hwmon_group_temp = {
+ .is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_temp,
};
+static struct vexpress_hwmon_type vexpress_hwmon_temp = {
+ .name = "vexpress_temp",
+ .attr_groups = (const struct attribute_group *[]) {
+ &vexpress_hwmon_group_temp,
+ NULL
+ },
+};
static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show,
NULL, 1);
static VEXPRESS_HWMON_ATTRS(power, power1_label, power1_input);
static struct attribute_group vexpress_hwmon_group_power = {
+ .is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_power,
};
+static struct vexpress_hwmon_type vexpress_hwmon_power = {
+ .name = "vexpress_power",
+ .attr_groups = (const struct attribute_group *[]) {
+ &vexpress_hwmon_group_power,
+ NULL
+ },
+};
static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL);
static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show,
NULL, 1);
static VEXPRESS_HWMON_ATTRS(energy, energy1_label, energy1_input);
static struct attribute_group vexpress_hwmon_group_energy = {
+ .is_visible = vexpress_hwmon_attr_is_visible,
.attrs = vexpress_hwmon_attrs_energy,
};
+static struct vexpress_hwmon_type vexpress_hwmon_energy = {
+ .name = "vexpress_energy",
+ .attr_groups = (const struct attribute_group *[]) {
+ &vexpress_hwmon_group_energy,
+ NULL
+ },
+};
static struct of_device_id vexpress_hwmon_of_match[] = {
#if !defined(CONFIG_REGULATOR_VEXPRESS)
{
.compatible = "arm,vexpress-volt",
- .data = &vexpress_hwmon_group_volt,
+ .data = &vexpress_hwmon_volt,
},
#endif
{
.compatible = "arm,vexpress-amp",
- .data = &vexpress_hwmon_group_amp,
+ .data = &vexpress_hwmon_amp,
}, {
.compatible = "arm,vexpress-temp",
- .data = &vexpress_hwmon_group_temp,
+ .data = &vexpress_hwmon_temp,
}, {
.compatible = "arm,vexpress-power",
- .data = &vexpress_hwmon_group_power,
+ .data = &vexpress_hwmon_power,
}, {
.compatible = "arm,vexpress-energy",
- .data = &vexpress_hwmon_group_energy,
+ .data = &vexpress_hwmon_energy,
},
{}
};
@@ -165,6 +221,7 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
int err;
const struct of_device_id *match;
struct vexpress_hwmon_data *data;
+ const struct vexpress_hwmon_type *type;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -174,12 +231,14 @@ static int vexpress_hwmon_probe(struct platform_device *pdev)
match = of_match_device(vexpress_hwmon_of_match, &pdev->dev);
if (!match)
return -ENODEV;
+ type = match->data;
+ data->name = type->name;
data->func = vexpress_config_func_get_by_dev(&pdev->dev);
if (!data->func)
return -ENODEV;
- err = sysfs_create_group(&pdev->dev.kobj, match->data);
+ err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups);
if (err)
goto error;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index a43220c2e3d9..4d140bbbe100 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -750,9 +750,10 @@ void intel_idle_state_table_update(void)
if (package_num + 1 > num_sockets) {
num_sockets = package_num + 1;
- if (num_sockets > 4)
+ if (num_sockets > 4) {
cpuidle_state_table = ivt_cstates_8s;
return;
+ }
}
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index d86196cfe4b4..24c28e3f93a3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -106,7 +106,7 @@ config AT91_ADC
Say yes here to build support for Atmel AT91 ADC.
config EXYNOS_ADC
- bool "Exynos ADC driver support"
+ tristate "Exynos ADC driver support"
depends on OF
help
Core support for the ADC block found in the Samsung EXYNOS series
@@ -114,7 +114,7 @@ config EXYNOS_ADC
this resource.
config LP8788_ADC
- bool "LP8788 ADC driver"
+ tristate "LP8788 ADC driver"
depends on MFD_LP8788
help
Say yes here to build support for TI LP8788 ADC.
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 5b1aa027c034..89777ed9abd8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -765,14 +765,17 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st,
if (!pdata)
return -EINVAL;
+ st->caps = (struct at91_adc_caps *)
+ platform_get_device_id(pdev)->driver_data;
+
st->use_external = pdata->use_external_triggers;
st->vref_mv = pdata->vref;
st->channels_mask = pdata->channels_used;
- st->num_channels = pdata->num_channels;
+ st->num_channels = st->caps->num_channels;
st->startup_time = pdata->startup_time;
st->trigger_number = pdata->trigger_number;
st->trigger_list = pdata->trigger_list;
- st->registers = pdata->registers;
+ st->registers = &st->caps->registers;
return 0;
}
@@ -1004,8 +1007,11 @@ static int at91_adc_probe(struct platform_device *pdev)
* the best converted final value between two channels selection
* The formula thus is : Sample and Hold Time = (shtim + 1) / ADCClock
*/
- shtim = round_up((st->sample_hold_time * adc_clk_khz /
- 1000) - 1, 1);
+ if (st->sample_hold_time > 0)
+ shtim = round_up((st->sample_hold_time * adc_clk_khz / 1000)
+ - 1, 1);
+ else
+ shtim = 0;
reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
@@ -1101,7 +1107,6 @@ static int at91_adc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
static struct at91_adc_caps at91sam9260_caps = {
.calc_startup_ticks = calc_startup_ticks_9260,
.num_channels = 4,
@@ -1154,11 +1159,27 @@ static const struct of_device_id at91_adc_dt_ids[] = {
{},
};
MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
-#endif
+
+static const struct platform_device_id at91_adc_ids[] = {
+ {
+ .name = "at91sam9260-adc",
+ .driver_data = (unsigned long)&at91sam9260_caps,
+ }, {
+ .name = "at91sam9g45-adc",
+ .driver_data = (unsigned long)&at91sam9g45_caps,
+ }, {
+ .name = "at91sam9x5-adc",
+ .driver_data = (unsigned long)&at91sam9x5_caps,
+ }, {
+ /* terminator */
+ }
+};
+MODULE_DEVICE_TABLE(platform, at91_adc_ids);
static struct platform_driver at91_adc_driver = {
.probe = at91_adc_probe,
.remove = at91_adc_remove,
+ .id_table = at91_adc_ids,
.driver = {
.name = DRIVER_NAME,
.of_match_table = of_match_ptr(at91_adc_dt_ids),
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index d25b262193a7..affa93f51789 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -344,7 +344,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
exynos_adc_hw_init(info);
- ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev);
+ ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed adding child nodes\n");
goto err_of_populate;
@@ -353,7 +353,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
return 0;
err_of_populate:
- device_for_each_child(&pdev->dev, NULL,
+ device_for_each_child(&indio_dev->dev, NULL,
exynos_adc_remove_devices);
regulator_disable(info->vdd);
clk_disable_unprepare(info->clk);
@@ -369,7 +369,7 @@ static int exynos_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
- device_for_each_child(&pdev->dev, NULL,
+ device_for_each_child(&indio_dev->dev, NULL,
exynos_adc_remove_devices);
regulator_disable(info->vdd);
clk_disable_unprepare(info->clk);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index cb9f96b446a5..d8ad606c7cd0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client,
{
struct inv_mpu6050_state *st;
struct iio_dev *indio_dev;
+ struct inv_mpu6050_platform_data *pdata;
int result;
if (!i2c_check_functionality(client->adapter,
@@ -672,8 +673,10 @@ static int inv_mpu_probe(struct i2c_client *client,
st = iio_priv(indio_dev);
st->client = client;
- st->plat_data = *(struct inv_mpu6050_platform_data
- *)dev_get_platdata(&client->dev);
+ pdata = (struct inv_mpu6050_platform_data
+ *)dev_get_platdata(&client->dev);
+ if (pdata)
+ st->plat_data = *pdata;
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st, id);
if (result)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e108f2a9d827..e472cff6eeae 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -165,7 +165,8 @@ static ssize_t iio_scan_el_show(struct device *dev,
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- ret = test_bit(to_iio_dev_attr(attr)->address,
+ /* Ensure ret is 0 or 1. */
+ ret = !!test_bit(to_iio_dev_attr(attr)->address,
indio_dev->buffer->scan_mask);
return sprintf(buf, "%d\n", ret);
@@ -862,7 +863,8 @@ int iio_scan_mask_query(struct iio_dev *indio_dev,
if (!buffer->scan_mask)
return 0;
- return test_bit(bit, buffer->scan_mask);
+ /* Ensure return value is 0 or 1. */
+ return !!test_bit(bit, buffer->scan_mask);
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 47a6dbac2d0c..d976e6ce60db 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -221,6 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
*val = cm32181->calibscale;
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
ret = cm32181_read_als_it(cm32181, val2);
return ret;
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index a45e07492db3..39fc67e82138 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -652,7 +652,19 @@ static int cm36651_probe(struct i2c_client *client,
cm36651->client = client;
cm36651->ps_client = i2c_new_dummy(client->adapter,
CM36651_I2C_ADDR_PS);
+ if (!cm36651->ps_client) {
+ dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+ ret = -ENODEV;
+ goto error_disable_reg;
+ }
+
cm36651->ara_client = i2c_new_dummy(client->adapter, CM36651_ARA);
+ if (!cm36651->ara_client) {
+ dev_err(&client->dev, "%s: new i2c device failed\n", __func__);
+ ret = -ENODEV;
+ goto error_i2c_unregister_ps;
+ }
+
mutex_init(&cm36651->lock);
indio_dev->dev.parent = &client->dev;
indio_dev->channels = cm36651_channels;
@@ -664,7 +676,7 @@ static int cm36651_probe(struct i2c_client *client,
ret = cm36651_setup_reg(cm36651);
if (ret) {
dev_err(&client->dev, "%s: register setup failed\n", __func__);
- goto error_disable_reg;
+ goto error_i2c_unregister_ara;
}
ret = request_threaded_irq(client->irq, NULL, cm36651_irq_handler,
@@ -672,7 +684,7 @@ static int cm36651_probe(struct i2c_client *client,
"cm36651", indio_dev);
if (ret) {
dev_err(&client->dev, "%s: request irq failed\n", __func__);
- goto error_disable_reg;
+ goto error_i2c_unregister_ara;
}
ret = iio_device_register(indio_dev);
@@ -685,6 +697,10 @@ static int cm36651_probe(struct i2c_client *client,
error_free_irq:
free_irq(client->irq, indio_dev);
+error_i2c_unregister_ara:
+ i2c_unregister_device(cm36651->ara_client);
+error_i2c_unregister_ps:
+ i2c_unregister_device(cm36651->ps_client);
error_disable_reg:
regulator_disable(cm36651->vled_reg);
return ret;
@@ -698,6 +714,8 @@ static int cm36651_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
regulator_disable(cm36651->vled_reg);
free_irq(client->irq, indio_dev);
+ i2c_unregister_device(cm36651->ps_client);
+ i2c_unregister_device(cm36651->ara_client);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index d4e8983fba53..23f38cf2c5cd 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,10 +1,10 @@
config INFINIBAND_CXGB4
- tristate "Chelsio T4 RDMA Driver"
+ tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR
---help---
- This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
- 10GbE adapters.
+ This is an iWARP/RDMA driver for the Chelsio T4 and T5
+ 1GbE, 10GbE adapters and T5 40GbE adapter.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 185452abf32c..1f863a96a480 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ }
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- state_set(&ep->com, ABORTING);
+ __state_set(&ep->com, ABORTING);
set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return credits;
}
-static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params;
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
enum c4iw_qp_attr_mask mask;
int err;
+ int disconnect = 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* will abort the connection.
*/
if (stop_ep_timer(ep))
- return;
+ return 0;
/*
* If we get more than the supported amount of private data
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* if we don't even have the mpa message, then bail.
*/
if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
+ return 0;
mpa = (struct mpa_message *) ep->mpa_pkt;
/* Validate MPA header. */
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* We'll continue process when more data arrives.
*/
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
+ return 0;
if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED;
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
+ disconnect = 1;
goto out;
}
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
+ disconnect = 1;
goto out;
}
goto out;
@@ -1366,7 +1375,7 @@ err:
send_abort(ep, skb, GFP_KERNEL);
out:
connect_reply_upcall(ep, err);
- return;
+ return disconnect;
}
static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status;
+ int disconnect = 0;
ep = lookup_tid(t, tid);
if (!ep)
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
switch (ep->com.state) {
case MPA_REQ_SENT:
ep->rcv_seq += dlen;
- process_mpa_reply(ep, skb);
+ disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
ep->rcv_seq += dlen;
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.state, ep->hwtid, status);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ disconnect = 1;
break;
}
default:
break;
}
mutex_unlock(&ep->com.mutex);
+ if (disconnect)
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
return 0;
}
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1);
}
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ }
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
__func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
- mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
+ mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7b8c5806a09d..7474b490760a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
u8 ecode;
u16 sq_db_inc;
u16 rq_db_inc;
+ u8 send_term;
};
struct c4iw_qp {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7b5114cb486f..086f62f5dc9e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
- disconnect = 1;
- c4iw_get_ep(&qhp->ep->com);
- if (!internal)
+ if (!internal) {
+ c4iw_get_ep(&qhp->ep->com);
terminate = 1;
- else {
+ disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
/*
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
* ringing the queue db when we're in DB_FULL mode.
+ * Only allow this on T4 devices.
*/
attrs.sq_db_inc = attr->sq_psn;
attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+ if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+ (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+ return -EINVAL;
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index dc193c292671..6121ca08fe58 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -836,4 +836,18 @@ struct ulptx_idata {
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+#define S_CONG_CNTRL 14
+#define M_CONG_CNTRL 0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define T5_OPT_2_VALID (1 << 31)
+
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/input/misc/da9055_onkey.c b/drivers/input/misc/da9055_onkey.c
index 4b11ede34950..4765799fef74 100644
--- a/drivers/input/misc/da9055_onkey.c
+++ b/drivers/input/misc/da9055_onkey.c
@@ -109,7 +109,6 @@ static int da9055_onkey_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work);
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
err = request_threaded_irq(irq, NULL, da9055_onkey_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"ONKEY", onkey);
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 08ead2aaede5..20c80f543d5e 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -169,6 +169,7 @@ static int soc_button_pnp_probe(struct pnp_dev *pdev,
soc_button_remove(pdev);
return error;
}
+ continue;
}
priv->children[i] = pd;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ef1cf52f8bb9..088d3541c7d3 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1353,6 +1353,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 6:
case 7:
case 8:
+ case 9:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d8d49d10f9bb..ef9f4913450d 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,44 @@ void synaptics_reset(struct psmouse *psmouse)
}
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+/* This list has been kindly provided by Synaptics. */
+static const char * const topbuttonpad_pnp_ids[] = {
+ "LEN0017",
+ "LEN0018",
+ "LEN0019",
+ "LEN0023",
+ "LEN002A",
+ "LEN002B",
+ "LEN002C",
+ "LEN002D",
+ "LEN002E",
+ "LEN0033", /* Helix */
+ "LEN0034", /* T431s, T540, X1 Carbon 2nd */
+ "LEN0035", /* X240 */
+ "LEN0036", /* T440 */
+ "LEN0037",
+ "LEN0038",
+ "LEN0041",
+ "LEN0042", /* Yoga */
+ "LEN0045",
+ "LEN0046",
+ "LEN0047",
+ "LEN0048",
+ "LEN0049",
+ "LEN2000",
+ "LEN2001",
+ "LEN2002",
+ "LEN2003",
+ "LEN2004", /* L440 */
+ "LEN2005",
+ "LEN2006",
+ "LEN2007",
+ "LEN2008",
+ "LEN2009",
+ "LEN200A",
+ "LEN200B",
+ NULL
+};
/*****************************************************************************
* Synaptics communications functions
@@ -1255,8 +1293,10 @@ static void set_abs_position_params(struct input_dev *dev,
input_abs_set_res(dev, y_code, priv->y_res);
}
-static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
+static void set_input_params(struct psmouse *psmouse,
+ struct synaptics_data *priv)
{
+ struct input_dev *dev = psmouse->dev;
int i;
/* Things that apply to both modes */
@@ -1325,6 +1365,17 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
+ /* See if this buttonpad has a top button area */
+ if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
+ for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
+ if (strstr(psmouse->ps2dev.serio->firmware_id,
+ topbuttonpad_pnp_ids[i])) {
+ __set_bit(INPUT_PROP_TOPBUTTONPAD,
+ dev->propbit);
+ break;
+ }
+ }
+ }
/* Clickpads report only left button */
__clear_bit(BTN_RIGHT, dev->keybit);
__clear_bit(BTN_MIDDLE, dev->keybit);
@@ -1515,6 +1566,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
.driver_data = (int []){1232, 5710, 1156, 4696},
},
{
+ /* Lenovo ThinkPad T431s */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
/* Lenovo ThinkPad T440s */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1523,6 +1582,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
.driver_data = (int []){1024, 5112, 2024, 4832},
},
{
+ /* Lenovo ThinkPad L440 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
/* Lenovo ThinkPad T540p */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -1530,6 +1597,32 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
},
.driver_data = (int []){1024, 5056, 2058, 4832},
},
+ {
+ /* Lenovo ThinkPad L540 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
+ {
+ /* Lenovo Yoga S1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "ThinkPad S1 Yoga"),
+ },
+ .driver_data = (int []){1232, 5710, 1156, 4696},
+ },
+ {
+ /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION,
+ "ThinkPad X1 Carbon 2nd"),
+ },
+ .driver_data = (int []){1024, 5112, 2024, 4832},
+ },
#endif
{ }
};
@@ -1593,7 +1686,7 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
priv->board_id, priv->firmware_id);
- set_input_params(psmouse->dev, priv);
+ set_input_params(psmouse, priv);
/*
* Encode touchpad model so that it can be used to set
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0ec9abbe31fe..381b20d4c561 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -702,6 +702,17 @@ static int i8042_pnp_aux_irq;
static char i8042_pnp_kbd_name[32];
static char i8042_pnp_aux_name[32];
+static void i8042_pnp_id_to_string(struct pnp_id *id, char *dst, int dst_size)
+{
+ strlcpy(dst, "PNP:", dst_size);
+
+ while (id) {
+ strlcat(dst, " ", dst_size);
+ strlcat(dst, id->id, dst_size);
+ id = id->next;
+ }
+}
+
static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
{
if (pnp_port_valid(dev, 0) && pnp_port_len(dev, 0) == 1)
@@ -718,6 +729,8 @@ static int i8042_pnp_kbd_probe(struct pnp_dev *dev, const struct pnp_device_id *
strlcat(i8042_pnp_kbd_name, ":", sizeof(i8042_pnp_kbd_name));
strlcat(i8042_pnp_kbd_name, pnp_dev_name(dev), sizeof(i8042_pnp_kbd_name));
}
+ i8042_pnp_id_to_string(dev->id, i8042_kbd_firmware_id,
+ sizeof(i8042_kbd_firmware_id));
/* Keyboard ports are always supposed to be wakeup-enabled */
device_set_wakeup_enable(&dev->dev, true);
@@ -742,6 +755,8 @@ static int i8042_pnp_aux_probe(struct pnp_dev *dev, const struct pnp_device_id *
strlcat(i8042_pnp_aux_name, ":", sizeof(i8042_pnp_aux_name));
strlcat(i8042_pnp_aux_name, pnp_dev_name(dev), sizeof(i8042_pnp_aux_name));
}
+ i8042_pnp_id_to_string(dev->id, i8042_aux_firmware_id,
+ sizeof(i8042_aux_firmware_id));
i8042_pnp_aux_devices++;
return 0;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 020053fa5aaa..3807c3e971cc 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -87,6 +87,8 @@ MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
#endif
static bool i8042_bypass_aux_irq_test;
+static char i8042_kbd_firmware_id[128];
+static char i8042_aux_firmware_id[128];
#include "i8042.h"
@@ -1218,6 +1220,8 @@ static int __init i8042_create_kbd_port(void)
serio->dev.parent = &i8042_platform_device->dev;
strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
strlcpy(serio->phys, I8042_KBD_PHYS_DESC, sizeof(serio->phys));
+ strlcpy(serio->firmware_id, i8042_kbd_firmware_id,
+ sizeof(serio->firmware_id));
port->serio = serio;
port->irq = I8042_KBD_IRQ;
@@ -1244,6 +1248,8 @@ static int __init i8042_create_aux_port(int idx)
if (idx < 0) {
strlcpy(serio->name, "i8042 AUX port", sizeof(serio->name));
strlcpy(serio->phys, I8042_AUX_PHYS_DESC, sizeof(serio->phys));
+ strlcpy(serio->firmware_id, i8042_aux_firmware_id,
+ sizeof(serio->firmware_id));
serio->close = i8042_port_close;
} else {
snprintf(serio->name, sizeof(serio->name), "i8042 AUX%d port", idx);
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f4c4ab04bc2..b29134de983b 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -451,6 +451,13 @@ static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *
return retval;
}
+static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct serio *serio = to_serio_port(dev);
+
+ return sprintf(buf, "%s\n", serio->firmware_id);
+}
+
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(proto);
static DEVICE_ATTR_RO(id);
@@ -473,12 +480,14 @@ static DEVICE_ATTR_RO(modalias);
static DEVICE_ATTR_WO(drvctl);
static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+static DEVICE_ATTR_RO(firmware_id);
static struct attribute *serio_device_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_description.attr,
&dev_attr_drvctl.attr,
&dev_attr_bind_mode.attr,
+ &dev_attr_firmware_id.attr,
NULL
};
@@ -921,9 +930,14 @@ static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
+
SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
+ if (serio->firmware_id[0])
+ SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
+ serio->firmware_id);
+
return 0;
}
#undef SERIO_ADD_UEVENT_VAR
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index b16ebef5b911..611fc3905d00 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -22,23 +22,18 @@
#define HID_USAGE_PAGE_DIGITIZER 0x0d
#define HID_USAGE_PAGE_DESKTOP 0x01
#define HID_USAGE 0x09
-#define HID_USAGE_X 0x30
-#define HID_USAGE_Y 0x31
-#define HID_USAGE_X_TILT 0x3d
-#define HID_USAGE_Y_TILT 0x3e
-#define HID_USAGE_FINGER 0x22
-#define HID_USAGE_STYLUS 0x20
-#define HID_USAGE_CONTACTMAX 0x55
+#define HID_USAGE_X ((HID_USAGE_PAGE_DESKTOP << 16) | 0x30)
+#define HID_USAGE_Y ((HID_USAGE_PAGE_DESKTOP << 16) | 0x31)
+#define HID_USAGE_PRESSURE ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x30)
+#define HID_USAGE_X_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3d)
+#define HID_USAGE_Y_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3e)
+#define HID_USAGE_FINGER ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x22)
+#define HID_USAGE_STYLUS ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x20)
+#define HID_USAGE_CONTACTMAX ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x55)
#define HID_COLLECTION 0xa1
#define HID_COLLECTION_LOGICAL 0x02
#define HID_COLLECTION_END 0xc0
-enum {
- WCM_UNDEFINED = 0,
- WCM_DESKTOP,
- WCM_DIGITIZER,
-};
-
struct hid_descriptor {
struct usb_descriptor_header header;
__le16 bcdHID;
@@ -305,7 +300,7 @@ static int wacom_parse_hid(struct usb_interface *intf,
char limit = 0;
/* result has to be defined as int for some devices */
int result = 0, touch_max = 0;
- int i = 0, usage = WCM_UNDEFINED, finger = 0, pen = 0;
+ int i = 0, page = 0, finger = 0, pen = 0;
unsigned char *report;
report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
@@ -332,134 +327,121 @@ static int wacom_parse_hid(struct usb_interface *intf,
switch (report[i]) {
case HID_USAGE_PAGE:
- switch (report[i + 1]) {
- case HID_USAGE_PAGE_DIGITIZER:
- usage = WCM_DIGITIZER;
- i++;
- break;
-
- case HID_USAGE_PAGE_DESKTOP:
- usage = WCM_DESKTOP;
- i++;
- break;
- }
+ page = report[i + 1];
+ i++;
break;
case HID_USAGE:
- switch (report[i + 1]) {
+ switch (page << 16 | report[i + 1]) {
case HID_USAGE_X:
- if (usage == WCM_DESKTOP) {
- if (finger) {
- features->device_type = BTN_TOOL_FINGER;
- /* touch device at least supports one touch point */
- touch_max = 1;
- switch (features->type) {
- case TABLETPC2FG:
- features->pktlen = WACOM_PKGLEN_TPC2FG;
- break;
-
- case MTSCREEN:
- case WACOM_24HDT:
- features->pktlen = WACOM_PKGLEN_MTOUCH;
- break;
-
- case MTTPC:
- features->pktlen = WACOM_PKGLEN_MTTPC;
- break;
-
- case BAMBOO_PT:
- features->pktlen = WACOM_PKGLEN_BBTOUCH;
- break;
-
- default:
- features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- break;
- }
-
- switch (features->type) {
- case BAMBOO_PT:
- features->x_phy =
- get_unaligned_le16(&report[i + 5]);
- features->x_max =
- get_unaligned_le16(&report[i + 8]);
- i += 15;
- break;
-
- case WACOM_24HDT:
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- features->x_phy =
- get_unaligned_le16(&report[i + 8]);
- features->unit = report[i - 1];
- features->unitExpo = report[i - 3];
- i += 12;
- break;
-
- default:
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- features->x_phy =
- get_unaligned_le16(&report[i + 6]);
- features->unit = report[i + 9];
- features->unitExpo = report[i + 11];
- i += 12;
- break;
- }
- } else if (pen) {
- /* penabled only accepts exact bytes of data */
- if (features->type >= TABLETPC)
- features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- features->device_type = BTN_TOOL_PEN;
+ if (finger) {
+ features->device_type = BTN_TOOL_FINGER;
+ /* touch device at least supports one touch point */
+ touch_max = 1;
+ switch (features->type) {
+ case TABLETPC2FG:
+ features->pktlen = WACOM_PKGLEN_TPC2FG;
+ break;
+
+ case MTSCREEN:
+ case WACOM_24HDT:
+ features->pktlen = WACOM_PKGLEN_MTOUCH;
+ break;
+
+ case MTTPC:
+ features->pktlen = WACOM_PKGLEN_MTTPC;
+ break;
+
+ case BAMBOO_PT:
+ features->pktlen = WACOM_PKGLEN_BBTOUCH;
+ break;
+
+ default:
+ features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ break;
+ }
+
+ switch (features->type) {
+ case BAMBOO_PT:
+ features->x_phy =
+ get_unaligned_le16(&report[i + 5]);
+ features->x_max =
+ get_unaligned_le16(&report[i + 8]);
+ i += 15;
+ break;
+
+ case WACOM_24HDT:
features->x_max =
get_unaligned_le16(&report[i + 3]);
- i += 4;
+ features->x_phy =
+ get_unaligned_le16(&report[i + 8]);
+ features->unit = report[i - 1];
+ features->unitExpo = report[i - 3];
+ i += 12;
+ break;
+
+ default:
+ features->x_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->x_phy =
+ get_unaligned_le16(&report[i + 6]);
+ features->unit = report[i + 9];
+ features->unitExpo = report[i + 11];
+ i += 12;
+ break;
}
+ } else if (pen) {
+ /* penabled only accepts exact bytes of data */
+ if (features->type >= TABLETPC)
+ features->pktlen = WACOM_PKGLEN_GRAPHIRE;
+ features->device_type = BTN_TOOL_PEN;
+ features->x_max =
+ get_unaligned_le16(&report[i + 3]);
+ i += 4;
}
break;
case HID_USAGE_Y:
- if (usage == WCM_DESKTOP) {
- if (finger) {
- switch (features->type) {
- case TABLETPC2FG:
- case MTSCREEN:
- case MTTPC:
- features->y_max =
- get_unaligned_le16(&report[i + 3]);
- features->y_phy =
- get_unaligned_le16(&report[i + 6]);
- i += 7;
- break;
-
- case WACOM_24HDT:
- features->y_max =
- get_unaligned_le16(&report[i + 3]);
- features->y_phy =
- get_unaligned_le16(&report[i - 2]);
- i += 7;
- break;
-
- case BAMBOO_PT:
- features->y_phy =
- get_unaligned_le16(&report[i + 3]);
- features->y_max =
- get_unaligned_le16(&report[i + 6]);
- i += 12;
- break;
-
- default:
- features->y_max =
- features->x_max;
- features->y_phy =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- break;
- }
- } else if (pen) {
+ if (finger) {
+ switch (features->type) {
+ case TABLETPC2FG:
+ case MTSCREEN:
+ case MTTPC:
+ features->y_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->y_phy =
+ get_unaligned_le16(&report[i + 6]);
+ i += 7;
+ break;
+
+ case WACOM_24HDT:
+ features->y_max =
+ get_unaligned_le16(&report[i + 3]);
+ features->y_phy =
+ get_unaligned_le16(&report[i - 2]);
+ i += 7;
+ break;
+
+ case BAMBOO_PT:
+ features->y_phy =
+ get_unaligned_le16(&report[i + 3]);
+ features->y_max =
+ get_unaligned_le16(&report[i + 6]);
+ i += 12;
+ break;
+
+ default:
features->y_max =
+ features->x_max;
+ features->y_phy =
get_unaligned_le16(&report[i + 3]);
i += 4;
+ break;
}
+ } else if (pen) {
+ features->y_max =
+ get_unaligned_le16(&report[i + 3]);
+ i += 4;
}
break;
@@ -484,12 +466,20 @@ static int wacom_parse_hid(struct usb_interface *intf,
wacom_retrieve_report_data(intf, features);
i++;
break;
+
+ case HID_USAGE_PRESSURE:
+ if (pen) {
+ features->pressure_max =
+ get_unaligned_le16(&report[i + 3]);
+ i += 4;
+ }
+ break;
}
break;
case HID_COLLECTION_END:
/* reset UsagePage and Finger */
- finger = usage = 0;
+ finger = page = 0;
break;
case HID_COLLECTION:
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 05f371df6c40..4822c57a3756 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -178,10 +178,9 @@ static int wacom_ptu_irq(struct wacom_wac *wacom)
static int wacom_dtu_irq(struct wacom_wac *wacom)
{
- struct wacom_features *features = &wacom->features;
- char *data = wacom->data;
+ unsigned char *data = wacom->data;
struct input_dev *input = wacom->input;
- int prox = data[1] & 0x20, pressure;
+ int prox = data[1] & 0x20;
dev_dbg(input->dev.parent,
"%s: received report #%d", __func__, data[0]);
@@ -198,10 +197,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
- pressure = ((data[7] & 0x01) << 8) | data[6];
- if (pressure < 0)
- pressure = features->pressure_max + pressure + 1;
- input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x01) << 8) | data[6]);
input_report_key(input, BTN_TOUCH, data[1] & 0x05);
if (!prox) /* out-prox */
wacom->id[0] = 0;
@@ -906,7 +902,7 @@ static int int_dist(int x1, int y1, int x2, int y2)
static int wacom_24hdt_irq(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
- char *data = wacom->data;
+ unsigned char *data = wacom->data;
int i;
int current_num_contacts = data[61];
int contacts_to_send = 0;
@@ -959,7 +955,7 @@ static int wacom_24hdt_irq(struct wacom_wac *wacom)
static int wacom_mt_touch(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
- char *data = wacom->data;
+ unsigned char *data = wacom->data;
int i;
int current_num_contacts = data[2];
int contacts_to_send = 0;
@@ -1038,7 +1034,7 @@ static int wacom_tpc_mt_touch(struct wacom_wac *wacom)
static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
{
- char *data = wacom->data;
+ unsigned char *data = wacom->data;
struct input_dev *input = wacom->input;
bool prox;
int x = 0, y = 0;
@@ -1074,10 +1070,8 @@ static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len)
static int wacom_tpc_pen(struct wacom_wac *wacom)
{
- struct wacom_features *features = &wacom->features;
- char *data = wacom->data;
+ unsigned char *data = wacom->data;
struct input_dev *input = wacom->input;
- int pressure;
bool prox = data[1] & 0x20;
if (!wacom->shared->stylus_in_proximity) /* first in prox */
@@ -1093,10 +1087,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
- pressure = ((data[7] & 0x01) << 8) | data[6];
- if (pressure < 0)
- pressure = features->pressure_max + pressure + 1;
- input_report_abs(input, ABS_PRESSURE, pressure);
+ input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x03) << 8) | data[6]);
input_report_key(input, BTN_TOUCH, data[1] & 0x05);
input_report_key(input, wacom->tool[0], prox);
return 1;
@@ -1107,7 +1098,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
{
- char *data = wacom->data;
+ unsigned char *data = wacom->data;
dev_dbg(wacom->input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
@@ -1838,7 +1829,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case DTU:
if (features->type == DTUS) {
input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
}
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 45a06e495ed2..7f8aa981500d 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -425,7 +425,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct ads7846 *ts = dev_get_drvdata(dev); \
- ssize_t v = ads7846_read12_ser(dev, \
+ ssize_t v = ads7846_read12_ser(&ts->spi->dev, \
READ_12BIT_SER(var)); \
if (v < 0) \
return v; \
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8b89e33a89fe..647c3c7fd742 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1381,7 +1381,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
do {
next = pmd_addr_end(addr, end);
- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+ ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
prot, stage);
phys += next - addr;
} while (pmd++, addr = next, addr < end);
@@ -1499,7 +1499,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
- return ret ? ret : size;
+ return ret ? 0 : size;
}
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index f445c10df8df..39f8b717fe84 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -152,7 +152,8 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
info->seg = pci_domain_nr(dev->bus);
info->level = level;
if (event == BUS_NOTIFY_ADD_DEVICE) {
- for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
+ for (tmp = dev; tmp; tmp = tmp->bus->self) {
+ level--;
info->path[level].device = PCI_SLOT(tmp->devfn);
info->path[level].function = PCI_FUNC(tmp->devfn);
if (pci_is_root_bus(tmp->bus))
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 69fa7da5e48b..f256ffc02e29 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1009,11 +1009,13 @@ static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
if (level == 1)
return freelist;
- for (pte = page_address(pg); !first_pte_in_page(pte); pte++) {
+ pte = page_address(pg);
+ do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
freelist = dma_pte_list_pagetables(domain, level - 1,
pte, freelist);
- }
+ pte++;
+ } while (!first_pte_in_page(pte));
return freelist;
}
@@ -2235,7 +2237,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
bridge_devfn = dev_tmp->devfn;
}
spin_lock_irqsave(&device_domain_lock, flags);
- info = dmar_search_domain_by_dev_info(segment, bus, devfn);
+ info = dmar_search_domain_by_dev_info(segment,
+ bridge_bus,
+ bridge_devfn);
if (info) {
iommu = info->iommu;
domain = info->domain;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 41be897df8d5..3899ba7821c5 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -41,6 +41,7 @@
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_375_PPI_CAUSE (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
struct msi_desc *desc)
{
struct msi_msg msg;
- irq_hw_number_t hwirq;
- int virq;
+ int virq, hwirq;
hwirq = armada_370_xp_alloc_msi();
if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
+ unsigned long hwirq = d->hwirq;
+
irq_dispose_mapping(irq);
- armada_370_xp_free_msi(d->hwirq);
+ armada_370_xp_free_msi(hwirq);
+}
+
+static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
+ int nvec, int type)
+{
+ /* We support MSI, but not MSI-X */
+ if (type == PCI_CAP_ID_MSI)
+ return 0;
+ return -EINVAL;
}
static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+ msi_chip->check_device = armada_370_xp_check_msi_device;
msi_chip->of_node = node;
armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
- unsigned long reg;
- unsigned long new_mask = 0;
- unsigned long online_mask = 0;
- unsigned long count = 0;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long reg, mask;
int cpu;
- for_each_cpu(cpu, mask_val) {
- new_mask |= 1 << cpu_logical_map(cpu);
- count++;
- }
-
- /*
- * Forbid mutlicore interrupt affinity
- * This is required since the MPIC HW doesn't limit
- * several CPUs from acknowledging the same interrupt.
- */
- if (count > 1)
- return -EINVAL;
-
- for_each_cpu(cpu, cpu_online_mask)
- online_mask |= 1 << cpu_logical_map(cpu);
+ /* Select a single core from the affinity mask which is online */
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ mask = 1UL << cpu_logical_map(cpu);
raw_spin_lock(&irq_controller_lock);
-
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- reg = (reg & (~online_mask)) | new_mask;
+ reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
raw_spin_unlock(&irq_controller_lock);
return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
-
- /*
- * Set the default affinity from all CPUs to the boot cpu.
- * This is required since the MPIC doesn't limit several CPUs
- * from acknowledging the same interrupt.
- */
- cpumask_clear(irq_default_affinity);
- cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
#endif
armada_370_xp_msi_init(node, main_int_res.start);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index fc817d28d1fe..3d15d16a7088 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
int i, size, max, reserved = 0, entry;
const __be32 *irqsr;
- cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4300b6606f5e..57d165e026f4 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -246,10 +246,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
- unsigned int shift = (gic_irq(d) % 4) * 8;
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
u32 val, mask, bit;
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1bf4a71919ec..9380be7b1895 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
} else {
inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
!is_dirty(cache, lookup_result.cblock))
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 53728be84dee..13abade76ad9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -232,6 +232,13 @@ struct thin_c {
struct bio_list deferred_bio_list;
struct bio_list retry_on_resume_list;
struct rb_root sort_bio_list; /* sorted list of deferred bios */
+
+ /*
+ * Ensures the thin is not destroyed until the worker has finished
+ * iterating the active_thins list.
+ */
+ atomic_t refcount;
+ struct completion can_destroy;
};
/*----------------------------------------------------------------*/
@@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
blk_finish_plug(&plug);
}
+static void thin_get(struct thin_c *tc);
+static void thin_put(struct thin_c *tc);
+
+/*
+ * We can't hold rcu_read_lock() around code that can block. So we
+ * find a thin with the rcu lock held; bump a refcount; then drop
+ * the lock.
+ */
+static struct thin_c *get_first_thin(struct pool *pool)
+{
+ struct thin_c *tc = NULL;
+
+ rcu_read_lock();
+ if (!list_empty(&pool->active_thins)) {
+ tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ thin_get(tc);
+ }
+ rcu_read_unlock();
+
+ return tc;
+}
+
+static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
+{
+ struct thin_c *old_tc = tc;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
+ thin_get(tc);
+ thin_put(old_tc);
+ rcu_read_unlock();
+ return tc;
+ }
+ thin_put(old_tc);
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
@@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool)
struct bio_list bios;
struct thin_c *tc;
- rcu_read_lock();
- list_for_each_entry_rcu(tc, &pool->active_thins, list)
+ tc = get_first_thin(pool);
+ while (tc) {
process_thin_deferred_bios(tc);
- rcu_read_unlock();
+ tc = get_next_thin(pool, tc);
+ }
/*
* If there are any deferred flush bios, we must commit
@@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
{
struct noflush_work w;
- INIT_WORK(&w.worker, fn);
+ INIT_WORK_ONSTACK(&w.worker, fn);
w.tc = tc;
atomic_set(&w.complete, 0);
init_waitqueue_head(&w.wait);
@@ -3061,11 +3108,25 @@ static struct target_type pool_target = {
/*----------------------------------------------------------------
* Thin target methods
*--------------------------------------------------------------*/
+static void thin_get(struct thin_c *tc)
+{
+ atomic_inc(&tc->refcount);
+}
+
+static void thin_put(struct thin_c *tc)
+{
+ if (atomic_dec_and_test(&tc->refcount))
+ complete(&tc->can_destroy);
+}
+
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
unsigned long flags;
+ thin_put(tc);
+ wait_for_completion(&tc->can_destroy);
+
spin_lock_irqsave(&tc->pool->lock, flags);
list_del_rcu(&tc->list);
spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
+ unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex);
@@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex);
- spin_lock(&tc->pool->lock);
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
- spin_unlock(&tc->pool->lock);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
* This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 796007a5e0e1..7a7bab8947ae 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -330,15 +330,17 @@ test_block_hash:
return r;
}
}
-
todo = 1 << v->data_dev_block_bits;
- while (io->iter.bi_size) {
+ do {
u8 *page;
+ unsigned len;
struct bio_vec bv = bio_iter_iovec(bio, io->iter);
page = kmap_atomic(bv.bv_page);
- r = crypto_shash_update(desc, page + bv.bv_offset,
- bv.bv_len);
+ len = bv.bv_len;
+ if (likely(len >= todo))
+ len = todo;
+ r = crypto_shash_update(desc, page + bv.bv_offset, len);
kunmap_atomic(page);
if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
return r;
}
- bio_advance_iter(bio, &io->iter, bv.bv_len);
- }
+ bio_advance_iter(bio, &io->iter, len);
+ todo -= len;
+ } while (todo);
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index c137abfa0c54..20f1655e6d75 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -56,7 +56,7 @@ config VIDEO_VIU
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C && DMADEVICES
+ depends on MFD_TIMBERDALE && VIDEO_V4L2 && I2C && DMADEVICES
select DMA_ENGINE
select TIMB_DMA
select VIDEO_ADV7180
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 7ff473c871a9..8d659e6a1b4c 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
* Create one workqueue per volume (per registered block device).
* Rembember workqueues are cheap, they're not threads.
*/
- dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
+ dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
if (!dev->wq)
goto out_free_queue;
INIT_WORK(&dev->work, ubiblock_do_work);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 02317c1c0238..0f3425dac910 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
rb_erase(&e->u.rb, &ubi->free);
return e;
@@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
peb = __wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
+ if (peb < 0)
+ return peb;
+
err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
@@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* Give the unused PEB back */
wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
goto out_cancel;
}
self_check_in_wl_tree(ubi, e1, &ubi->used);
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 5a59b85cdfc2..39c4d8d61074 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -362,7 +362,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.min_links);
+ return sprintf(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR,
bonding_show_min_links, bonding_sysfs_store_option);
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 61ffc12d8fd8..8ab7103d4f44 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,6 +14,13 @@ config CAN_C_CAN_PLATFORM
SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
boards like am335x, dm814x, dm813x and dm811x.
+config CAN_C_CAN_STRICT_FRAME_ORDERING
+ bool "Force a strict RX CAN frame order (may cause frame loss)"
+ ---help---
+ The RX split buffer prevents packet reordering but can cause packet
+ loss. Only enable this option when you accept to lose CAN frames
+ in favour of getting the received CAN frames in the correct order.
+
config CAN_C_CAN_PCI
tristate "Generic PCI Bus based C_CAN/D_CAN driver"
depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a5c8dcfa8357..a2ca820b5373 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -60,6 +60,8 @@
#define CONTROL_IE BIT(1)
#define CONTROL_INIT BIT(0)
+#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
+
/* test register */
#define TEST_RX BIT(7)
#define TEST_TX1 BIT(6)
@@ -108,11 +110,14 @@
#define IF_COMM_CONTROL BIT(4)
#define IF_COMM_CLR_INT_PND BIT(3)
#define IF_COMM_TXRQST BIT(2)
+#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST
#define IF_COMM_DATAA BIT(1)
#define IF_COMM_DATAB BIT(0)
-#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
- IF_COMM_CONTROL | IF_COMM_TXRQST | \
- IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* TX buffer setup */
+#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \
+ IF_COMM_TXRQST | \
+ IF_COMM_DATAA | IF_COMM_DATAB)
/* For the low buffers we clear the interrupt bit, but keep newdat */
#define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \
@@ -120,12 +125,19 @@
IF_COMM_DATAA | IF_COMM_DATAB)
/* For the high buffers we clear the interrupt bit and newdat */
-#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST)
+#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
+
+
+/* Receive setup of message objects */
+#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL)
+
+/* Invalidation of message objects */
+#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL)
/* IFx arbitration */
-#define IF_ARB_MSGVAL BIT(15)
-#define IF_ARB_MSGXTD BIT(14)
-#define IF_ARB_TRANSMIT BIT(13)
+#define IF_ARB_MSGVAL BIT(31)
+#define IF_ARB_MSGXTD BIT(30)
+#define IF_ARB_TRANSMIT BIT(29)
/* IFx message control */
#define IF_MCONT_NEWDAT BIT(15)
@@ -139,19 +151,17 @@
#define IF_MCONT_EOB BIT(7)
#define IF_MCONT_DLC_MASK 0xf
+#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK)
+#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB)
+
+#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB)
+
/*
* Use IF1 for RX and IF2 for TX
*/
#define IF_RX 0
#define IF_TX 1
-/* status interrupt */
-#define STATUS_INTERRUPT 0x8000
-
-/* global interrupt masks */
-#define ENABLE_ALL_INTERRUPTS 1
-#define DISABLE_ALL_INTERRUPTS 0
-
/* minimum timeout for checking BUSY status */
#define MIN_TIMEOUT_VALUE 6
@@ -171,6 +181,7 @@ enum c_can_lec_type {
LEC_BIT0_ERROR,
LEC_CRC_ERROR,
LEC_UNUSED,
+ LEC_MASK = LEC_UNUSED,
};
/*
@@ -226,143 +237,115 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
priv->raminit(priv, enable);
}
-static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+static void c_can_irq_control(struct c_can_priv *priv, bool enable)
{
- return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
- C_CAN_MSG_OBJ_TX_FIRST;
-}
-
-static inline int get_tx_echo_msg_obj(int txecho)
-{
- return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
-}
-
-static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
-{
- u32 val = priv->read_reg(priv, index);
- val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
- return val;
-}
-
-static void c_can_enable_all_interrupts(struct c_can_priv *priv,
- int enable)
-{
- unsigned int cntrl_save = priv->read_reg(priv,
- C_CAN_CTRL_REG);
+ u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
if (enable)
- cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
- else
- cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+ ctrl |= CONTROL_IRQMSK;
- priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
+ priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
}
-static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj)
{
- int count = MIN_TIMEOUT_VALUE;
+ struct c_can_priv *priv = netdev_priv(dev);
+ int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
+
+ priv->write_reg(priv, reg + 1, cmd);
+ priv->write_reg(priv, reg, obj);
- while (count && priv->read_reg(priv,
- C_CAN_IFACE(COMREQ_REG, iface)) &
- IF_COMR_BUSY) {
- count--;
+ for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
+ if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
+ return;
udelay(1);
}
+ netdev_err(dev, "Updating object timed out\n");
- if (!count)
- return 1;
+}
- return 0;
+static inline void c_can_object_get(struct net_device *dev, int iface,
+ u32 obj, u32 cmd)
+{
+ c_can_obj_update(dev, iface, cmd, obj);
}
-static inline void c_can_object_get(struct net_device *dev,
- int iface, int objno, int mask)
+static inline void c_can_object_put(struct net_device *dev, int iface,
+ u32 obj, u32 cmd)
{
- struct c_can_priv *priv = netdev_priv(dev);
+ c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj);
+}
- /*
- * As per specs, after writting the message object number in the
- * IF command request register the transfer b/w interface
- * register and message RAM must be complete in 6 CAN-CLK
- * period.
- */
- priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
- IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
- IFX_WRITE_LOW_16BIT(objno));
+/*
+ * Note: According to documentation clearing TXIE while MSGVAL is set
+ * is not allowed, but works nicely on C/DCAN. And that lowers the I/O
+ * load significantly.
+ */
+static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
- if (c_can_msg_obj_is_busy(priv, iface))
- netdev_err(dev, "timed out in object get\n");
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
+ c_can_object_put(dev, iface, obj, IF_COMM_INVAL);
}
-static inline void c_can_object_put(struct net_device *dev,
- int iface, int objno, int mask)
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj)
{
struct c_can_priv *priv = netdev_priv(dev);
- /*
- * As per specs, after writting the message object number in the
- * IF command request register the transfer b/w interface
- * register and message RAM must be complete in 6 CAN-CLK
- * period.
- */
- priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
- (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
- priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
- IFX_WRITE_LOW_16BIT(objno));
-
- if (c_can_msg_obj_is_busy(priv, iface))
- netdev_err(dev, "timed out in object put\n");
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+ c_can_inval_tx_object(dev, iface, obj);
}
-static void c_can_write_msg_object(struct net_device *dev,
- int iface, struct can_frame *frame, int objno)
+static void c_can_setup_tx_object(struct net_device *dev, int iface,
+ struct can_frame *frame, int idx)
{
- int i;
- u16 flags = 0;
- unsigned int id;
struct c_can_priv *priv = netdev_priv(dev);
-
- if (!(frame->can_id & CAN_RTR_FLAG))
- flags |= IF_ARB_TRANSMIT;
+ u16 ctrl = IF_MCONT_TX | frame->can_dlc;
+ bool rtr = frame->can_id & CAN_RTR_FLAG;
+ u32 arb = IF_ARB_MSGVAL;
+ int i;
if (frame->can_id & CAN_EFF_FLAG) {
- id = frame->can_id & CAN_EFF_MASK;
- flags |= IF_ARB_MSGXTD;
- } else
- id = ((frame->can_id & CAN_SFF_MASK) << 18);
+ arb |= frame->can_id & CAN_EFF_MASK;
+ arb |= IF_ARB_MSGXTD;
+ } else {
+ arb |= (frame->can_id & CAN_SFF_MASK) << 18;
+ }
+
+ if (!rtr)
+ arb |= IF_ARB_TRANSMIT;
- flags |= IF_ARB_MSGVAL;
+ /*
+ * If we change the DIR bit, we need to invalidate the buffer
+ * first, i.e. clear the MSGVAL flag in the arbiter.
+ */
+ if (rtr != (bool)test_bit(idx, &priv->tx_dir)) {
+ u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+
+ c_can_inval_msg_object(dev, iface, obj);
+ change_bit(idx, &priv->tx_dir);
+ }
+
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
- IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
- IFX_WRITE_HIGH_16BIT(id));
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
for (i = 0; i < frame->can_dlc; i += 2) {
priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] | (frame->data[i + 1] << 8));
}
-
- /* enable interrupt for this message object */
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
- frame->can_dlc);
- c_can_object_put(dev, iface, objno, IF_COMM_ALL);
}
static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
- int iface,
- int ctrl_mask)
+ int iface)
{
int i;
- struct c_can_priv *priv = netdev_priv(dev);
- for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
- ctrl_mask & ~IF_MCONT_NEWDAT);
- c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
- }
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
+ c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
}
static int c_can_handle_lost_msg_obj(struct net_device *dev,
@@ -377,6 +360,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
/* create an error msg */
skb = alloc_can_err_skb(dev, &frame);
if (unlikely(!skb))
@@ -384,22 +370,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev,
frame->can_id |= CAN_ERR_CRTL;
frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_errors++;
- stats->rx_over_errors++;
netif_receive_skb(skb);
return 1;
}
-static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
{
- u16 flags, data;
- int i;
- unsigned int val;
- struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- struct sk_buff *skb;
+ struct c_can_priv *priv = netdev_priv(dev);
struct can_frame *frame;
+ struct sk_buff *skb;
+ u32 arb, data;
skb = alloc_can_skb(dev, &frame);
if (!skb) {
@@ -409,115 +391,82 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
- val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
- (flags << 16);
+ arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
+ arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
- if (flags & IF_ARB_MSGXTD)
- frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ if (arb & IF_ARB_MSGXTD)
+ frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
- frame->can_id = (val >> 18) & CAN_SFF_MASK;
+ frame->can_id = (arb >> 18) & CAN_SFF_MASK;
- if (flags & IF_ARB_TRANSMIT)
+ if (arb & IF_ARB_TRANSMIT) {
frame->can_id |= CAN_RTR_FLAG;
- else {
- for (i = 0; i < frame->can_dlc; i += 2) {
- data = priv->read_reg(priv,
- C_CAN_IFACE(DATA1_REG, iface) + i / 2);
+ } else {
+ int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
+ data = priv->read_reg(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
}
- netif_receive_skb(skb);
-
stats->rx_packets++;
stats->rx_bytes += frame->can_dlc;
+
+ netif_receive_skb(skb);
return 0;
}
static void c_can_setup_receive_object(struct net_device *dev, int iface,
- int objno, unsigned int mask,
- unsigned int id, unsigned int mcont)
+ u32 obj, u32 mask, u32 id, u32 mcont)
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
- IFX_WRITE_LOW_16BIT(mask));
-
- /* According to C_CAN documentation, the reserved bit
- * in IFx_MASK2 register is fixed 1
- */
- priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
- IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
+ mask |= BIT(29);
+ priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
+ priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
- IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
- (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+ id |= IF_ARB_MSGVAL;
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
- c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
-
- netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
-}
-
-static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
-{
- struct c_can_priv *priv = netdev_priv(dev);
-
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
- priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
-
- c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
-
- netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
-}
-
-static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
-{
- int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
-
- /*
- * as transmission request register's bit n-1 corresponds to
- * message object n, we need to handle the same properly.
- */
- if (val & (1 << (objno - 1)))
- return 1;
-
- return 0;
+ c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
}
static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
- u32 msg_obj_no;
- struct c_can_priv *priv = netdev_priv(dev);
struct can_frame *frame = (struct can_frame *)skb->data;
+ struct c_can_priv *priv = netdev_priv(dev);
+ u32 idx, obj;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
-
- spin_lock_bh(&priv->xmit_lock);
- msg_obj_no = get_tx_next_msg_obj(priv);
-
- /* prepare message object for transmission */
- c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
- priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
- can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
-
/*
- * we have to stop the queue in case of a wrap around or
- * if the next TX message object is still in use
+ * This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
*/
- priv->tx_next++;
- if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
- (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+ idx = fls(atomic_read(&priv->tx_active));
+ obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+
+ /* If this is the last buffer, stop the xmit queue */
+ if (idx == C_CAN_MSG_OBJ_TX_NUM - 1)
netif_stop_queue(dev);
- spin_unlock_bh(&priv->xmit_lock);
+ /*
+ * Store the message in the interface so we can call
+ * can_put_echo_skb(). We must do this before we enable
+ * transmit as we might race against do_tx().
+ */
+ c_can_setup_tx_object(dev, IF_TX, frame, idx);
+ priv->dlc[idx] = frame->can_dlc;
+ can_put_echo_skb(skb, dev, idx);
+
+ /* Update the active bits */
+ atomic_add((1 << idx), &priv->tx_active);
+ /* Start transmission */
+ c_can_object_put(dev, IF_TX, obj, IF_COMM_TX);
return NETDEV_TX_OK;
}
@@ -594,11 +543,10 @@ static void c_can_configure_msg_objects(struct net_device *dev)
/* setup receive message objects */
for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
- c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
- (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+ c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV);
c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
- IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+ IF_MCONT_RCV_EOB);
}
/*
@@ -612,30 +560,22 @@ static int c_can_chip_config(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
/* enable automatic retransmission */
- priv->write_reg(priv, C_CAN_CTRL_REG,
- CONTROL_ENABLE_AR);
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
/* loopback + silent mode : useful for hot self-test */
- priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
- CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, C_CAN_TEST_REG,
- TEST_LBACK | TEST_SILENT);
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* loopback mode : useful for self-test function */
- priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
- CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
/* silent mode : bus-monitoring mode */
- priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
- CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
- } else
- /* normal mode*/
- priv->write_reg(priv, C_CAN_CTRL_REG,
- CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+ }
/* configure message objects */
c_can_configure_msg_objects(dev);
@@ -643,6 +583,11 @@ static int c_can_chip_config(struct net_device *dev)
/* set a `lec` value so that we can check for updates later */
priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
+ /* Clear all internal status */
+ atomic_set(&priv->tx_active, 0);
+ priv->rxmasked = 0;
+ priv->tx_dir = 0;
+
/* set bittiming params */
return c_can_set_bittiming(dev);
}
@@ -657,13 +602,11 @@ static int c_can_start(struct net_device *dev)
if (err)
return err;
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- /* reset tx helper pointers */
- priv->tx_next = priv->tx_echo = 0;
+ /* Setup the command for new messages */
+ priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
+ IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
- /* enable status change, error and module interrupts */
- c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
}
@@ -672,15 +615,13 @@ static void c_can_stop(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
- /* disable all interrupts */
- c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
-
- /* set the state as STOPPED */
+ c_can_irq_control(priv, false);
priv->can.state = CAN_STATE_STOPPED;
}
static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
{
+ struct c_can_priv *priv = netdev_priv(dev);
int err;
switch (mode) {
@@ -689,6 +630,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
if (err)
return err;
netif_wake_queue(dev);
+ c_can_irq_control(priv, true);
break;
default:
return -EOPNOTSUPP;
@@ -724,42 +666,29 @@ static int c_can_get_berr_counter(const struct net_device *dev,
return err;
}
-/*
- * priv->tx_echo holds the number of the oldest can_frame put for
- * transmission into the hardware, but not yet ACKed by the CAN tx
- * complete IRQ.
- *
- * We iterate from priv->tx_echo to priv->tx_next and check if the
- * packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted packet, stop looking for more.
- */
static void c_can_do_tx(struct net_device *dev)
{
struct c_can_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- u32 val, obj, pkts = 0, bytes = 0;
-
- spin_lock_bh(&priv->xmit_lock);
+ u32 idx, obj, pkts = 0, bytes = 0, pend, clr;
- for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
- obj = get_tx_echo_msg_obj(priv->tx_echo);
- val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
+ clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG);
- if (val & (1 << (obj - 1)))
- break;
-
- can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
- bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
+ while ((idx = ffs(pend))) {
+ idx--;
+ pend &= ~(1 << idx);
+ obj = idx + C_CAN_MSG_OBJ_TX_FIRST;
+ c_can_inval_tx_object(dev, IF_RX, obj);
+ can_get_echo_skb(dev, idx);
+ bytes += priv->dlc[idx];
pkts++;
- c_can_inval_msg_object(dev, IF_TX, obj);
}
- /* restart queue if wrap-up or if queue stalled on last pkt */
- if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
- ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
- netif_wake_queue(dev);
+ /* Clear the bits in the tx_active mask */
+ atomic_sub(clr, &priv->tx_active);
- spin_unlock_bh(&priv->xmit_lock);
+ if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1)))
+ netif_wake_queue(dev);
if (pkts) {
stats->tx_bytes += bytes;
@@ -800,18 +729,42 @@ static u32 c_can_adjust_pending(u32 pend)
return pend & ~((1 << lasts) - 1);
}
+static inline void c_can_rx_object_get(struct net_device *dev,
+ struct c_can_priv *priv, u32 obj)
+{
+#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
+ if (obj < C_CAN_MSG_RX_LOW_LAST)
+ c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
+ else
+#endif
+ c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
+}
+
+static inline void c_can_rx_finalize(struct net_device *dev,
+ struct c_can_priv *priv, u32 obj)
+{
+#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
+ if (obj < C_CAN_MSG_RX_LOW_LAST)
+ priv->rxmasked |= BIT(obj - 1);
+ else if (obj == C_CAN_MSG_RX_LOW_LAST) {
+ priv->rxmasked = 0;
+ /* activate all lower message objects */
+ c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
+ }
+#endif
+ if (priv->type != BOSCH_D_CAN)
+ c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
+}
+
static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
u32 pend, int quota)
{
- u32 pkts = 0, ctrl, obj, mcmd;
+ u32 pkts = 0, ctrl, obj;
while ((obj = ffs(pend)) && quota > 0) {
pend &= ~BIT(obj - 1);
- mcmd = obj < C_CAN_MSG_RX_LOW_LAST ?
- IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
-
- c_can_object_get(dev, IF_RX, obj, mcmd);
+ c_can_rx_object_get(dev, priv, obj);
ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
if (ctrl & IF_MCONT_MSGLST) {
@@ -833,9 +786,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
/* read the data from the message object */
c_can_read_msg_object(dev, IF_RX, ctrl);
- if (obj == C_CAN_MSG_RX_LOW_LAST)
- /* activate all lower message objects */
- c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl);
+ c_can_rx_finalize(dev, priv, obj);
pkts++;
quota--;
@@ -844,6 +795,16 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
return pkts;
}
+static inline u32 c_can_get_pending(struct c_can_priv *priv)
+{
+ u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
+
+#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
+ pend &= ~priv->rxmasked;
+#endif
+ return pend;
+}
+
/*
* theory of operation:
*
@@ -853,6 +814,8 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
* has arrived. To work-around this issue, we keep two groups of message
* objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
*
+ * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
+ *
* To ensure in-order frame reception we use the following
* approach while re-activating a message object to receive further
* frames:
@@ -865,6 +828,14 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
* - if the current message object number is greater than
* C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
* only this message object.
+ *
+ * This can cause packet loss!
+ *
+ * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
+ *
+ * We clear the newdat bit right away.
+ *
+ * This can result in packet reordering when the readout is slow.
*/
static int c_can_do_rx_poll(struct net_device *dev, int quota)
{
@@ -880,7 +851,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
while (quota > 0) {
if (!pend) {
- pend = priv->read_reg(priv, C_CAN_INTPND1_REG);
+ pend = c_can_get_pending(priv);
if (!pend)
break;
/*
@@ -905,12 +876,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
return pkts;
}
-static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
-{
- return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
- (priv->current_status & LEC_UNUSED);
-}
-
static int c_can_handle_state_change(struct net_device *dev,
enum c_can_bus_error_types error_type)
{
@@ -922,6 +887,26 @@ static int c_can_handle_state_change(struct net_device *dev,
struct sk_buff *skb;
struct can_berr_counter bec;
+ switch (error_type) {
+ case C_CAN_ERROR_WARNING:
+ /* error warning state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ break;
+ case C_CAN_ERROR_PASSIVE:
+ /* error passive state */
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case C_CAN_BUS_OFF:
+ /* bus-off state */
+ priv->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(dev);
+ break;
+ default:
+ break;
+ }
+
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
@@ -935,8 +920,6 @@ static int c_can_handle_state_change(struct net_device *dev,
switch (error_type) {
case C_CAN_ERROR_WARNING:
/* error warning state */
- priv->can.can_stats.error_warning++;
- priv->can.state = CAN_STATE_ERROR_WARNING;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
@@ -947,8 +930,6 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
case C_CAN_ERROR_PASSIVE:
/* error passive state */
- priv->can.can_stats.error_passive++;
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
if (rx_err_passive)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -960,22 +941,16 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
case C_CAN_BUS_OFF:
/* bus-off state */
- priv->can.state = CAN_STATE_BUS_OFF;
cf->can_id |= CAN_ERR_BUSOFF;
- /*
- * disable all interrupts in bus-off mode to ensure that
- * the CPU is not hogged down
- */
- c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
can_bus_off(dev);
break;
default:
break;
}
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
return 1;
}
@@ -996,6 +971,13 @@ static int c_can_handle_bus_err(struct net_device *dev,
if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
return 0;
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ return 0;
+
+ /* common for all type of bus errors */
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
/* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
@@ -1005,10 +987,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
* check for 'last error code' which tells us the
* type of the last error to occur on the CAN bus
*/
-
- /* common for all type of bus errors */
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
cf->data[2] |= CAN_ERR_PROT_UNSPEC;
@@ -1043,95 +1021,64 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
}
- /* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
-
- netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
-
+ netif_receive_skb(skb);
return 1;
}
static int c_can_poll(struct napi_struct *napi, int quota)
{
- u16 irqstatus;
- int lec_type = 0;
- int work_done = 0;
struct net_device *dev = napi->dev;
struct c_can_priv *priv = netdev_priv(dev);
+ u16 curr, last = priv->last_status;
+ int work_done = 0;
- irqstatus = priv->irqstatus;
- if (!irqstatus)
- goto end;
+ priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
+ /* Ack status on C_CAN. D_CAN is self clearing */
+ if (priv->type != BOSCH_D_CAN)
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
- /* status events have the highest priority */
- if (irqstatus == STATUS_INTERRUPT) {
- priv->current_status = priv->read_reg(priv,
- C_CAN_STS_REG);
-
- /* handle Tx/Rx events */
- if (priv->current_status & STATUS_TXOK)
- priv->write_reg(priv, C_CAN_STS_REG,
- priv->current_status & ~STATUS_TXOK);
-
- if (priv->current_status & STATUS_RXOK)
- priv->write_reg(priv, C_CAN_STS_REG,
- priv->current_status & ~STATUS_RXOK);
-
- /* handle state changes */
- if ((priv->current_status & STATUS_EWARN) &&
- (!(priv->last_status & STATUS_EWARN))) {
- netdev_dbg(dev, "entered error warning state\n");
- work_done += c_can_handle_state_change(dev,
- C_CAN_ERROR_WARNING);
- }
- if ((priv->current_status & STATUS_EPASS) &&
- (!(priv->last_status & STATUS_EPASS))) {
- netdev_dbg(dev, "entered error passive state\n");
- work_done += c_can_handle_state_change(dev,
- C_CAN_ERROR_PASSIVE);
- }
- if ((priv->current_status & STATUS_BOFF) &&
- (!(priv->last_status & STATUS_BOFF))) {
- netdev_dbg(dev, "entered bus off state\n");
- work_done += c_can_handle_state_change(dev,
- C_CAN_BUS_OFF);
- }
+ /* handle state changes */
+ if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
+ netdev_dbg(dev, "entered error warning state\n");
+ work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
+ }
- /* handle bus recovery events */
- if ((!(priv->current_status & STATUS_BOFF)) &&
- (priv->last_status & STATUS_BOFF)) {
- netdev_dbg(dev, "left bus off state\n");
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
- if ((!(priv->current_status & STATUS_EPASS)) &&
- (priv->last_status & STATUS_EPASS)) {
- netdev_dbg(dev, "left error passive state\n");
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
+ if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
+ netdev_dbg(dev, "entered error passive state\n");
+ work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
+ }
+
+ if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
+ netdev_dbg(dev, "entered bus off state\n");
+ work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
+ goto end;
+ }
- priv->last_status = priv->current_status;
-
- /* handle lec errors on the bus */
- lec_type = c_can_has_and_handle_berr(priv);
- if (lec_type)
- work_done += c_can_handle_bus_err(dev, lec_type);
- } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
- (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
- /* handle events corresponding to receive message objects */
- work_done += c_can_do_rx_poll(dev, (quota - work_done));
- } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
- (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
- /* handle events corresponding to transmit message objects */
- c_can_do_tx(dev);
+ /* handle bus recovery events */
+ if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
+ netdev_dbg(dev, "left bus off state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
+ netdev_dbg(dev, "left error passive state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
+ /* handle lec errors on the bus */
+ work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
+
+ /* Handle Tx/Rx events. We do this unconditionally */
+ work_done += c_can_do_rx_poll(dev, (quota - work_done));
+ c_can_do_tx(dev);
+
end:
if (work_done < quota) {
napi_complete(napi);
- /* enable all IRQs */
- c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+ /* enable all IRQs if we are not in bus off state */
+ if (priv->can.state != CAN_STATE_BUS_OFF)
+ c_can_irq_control(priv, true);
}
return work_done;
@@ -1142,12 +1089,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
- priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
- if (!priv->irqstatus)
+ if (!priv->read_reg(priv, C_CAN_INT_REG))
return IRQ_NONE;
/* disable all interrupts and schedule the NAPI */
- c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ c_can_irq_control(priv, false);
napi_schedule(&priv->napi);
return IRQ_HANDLED;
@@ -1184,6 +1130,8 @@ static int c_can_open(struct net_device *dev)
can_led_event(dev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
+ /* enable status change, error and module interrupts */
+ c_can_irq_control(priv, true);
netif_start_queue(dev);
return 0;
@@ -1226,7 +1174,6 @@ struct net_device *alloc_c_can_dev(void)
return NULL;
priv = netdev_priv(dev);
- spin_lock_init(&priv->xmit_lock);
netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
priv->dev = dev;
@@ -1281,6 +1228,7 @@ int c_can_power_up(struct net_device *dev)
u32 val;
unsigned long time_out;
struct c_can_priv *priv = netdev_priv(dev);
+ int ret;
if (!(dev->flags & IFF_UP))
return 0;
@@ -1307,7 +1255,11 @@ int c_can_power_up(struct net_device *dev)
if (time_after(jiffies, time_out))
return -ETIMEDOUT;
- return c_can_start(dev);
+ ret = c_can_start(dev);
+ if (!ret)
+ c_can_irq_control(priv, true);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(c_can_power_up);
#endif
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index faa8404162b3..c56f1b1c11ca 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,14 +22,6 @@
#ifndef C_CAN_H
#define C_CAN_H
-/*
- * IFx register masks:
- * allow easy operation on 16-bit registers when the
- * argument is 32-bit instead
- */
-#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
-#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
-
/* message object split */
#define C_CAN_NO_OF_OBJECTS 32
#define C_CAN_MSG_OBJ_RX_NUM 16
@@ -45,8 +37,6 @@
#define C_CAN_MSG_OBJ_RX_SPLIT 9
#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
-
-#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
#define RECEIVE_OBJECT_BITS 0x0000ffff
enum reg {
@@ -183,23 +173,20 @@ struct c_can_priv {
struct napi_struct napi;
struct net_device *dev;
struct device *device;
- spinlock_t xmit_lock;
- int tx_object;
- int current_status;
+ atomic_t tx_active;
+ unsigned long tx_dir;
int last_status;
u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
void __iomem *base;
const u16 *regs;
- unsigned long irq_flags; /* for request_irq() */
- unsigned int tx_next;
- unsigned int tx_echo;
void *priv; /* for board-specific data */
- u16 irqstatus;
enum c_can_dev_id type;
u32 __iomem *raminit_ctrlreg;
- unsigned int instance;
+ int instance;
void (*raminit) (const struct c_can_priv *priv, bool enable);
+ u32 comm_rcv_high;
+ u32 rxmasked;
u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
};
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7ab384f59e7e..58f71e1fcc4e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -115,8 +115,11 @@ static int c_can_pci_probe(struct pci_dev *pdev,
goto out_disable_device;
}
- pci_set_master(pdev);
- pci_enable_msi(pdev);
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(&pdev->dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ }
addr = pci_iomap(pdev, c_can_pci_data->bar,
pci_resource_len(pdev, c_can_pci_data->bar));
@@ -164,6 +167,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
goto out_free_c_can;
}
+ priv->type = c_can_pci_data->type;
+
/* Configure access to registers */
switch (c_can_pci_data->reg_align) {
case C_CAN_REG_ALIGN_32:
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 806d92753427..1df0b322d1e4 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -222,7 +222,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
+ if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c7a260478749..e318e87e2bfc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* Check if the CAN device has bit-timing parameters */
if (!btc)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
/*
* Depending on the given can_bittiming parameter structure the CAN
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index df136a2516c4..014695d7e6a3 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -46,6 +46,7 @@ static int clk[MAXDEV];
static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
int reg)
{
- unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags, base = (unsigned long)priv->reg_base;
+ u8 readval;
+ spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
outb(reg, base);
- return inb(base + 1);
+ readval = inb(base + 1);
+ spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
+
+ return readval;
}
static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
int reg, u8 val)
{
- unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags, base = (unsigned long)priv->reg_base;
+ spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
outb(reg, base);
outb(val, base + 1);
+ spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
}
static int sja1000_isa_probe(struct platform_device *pdev)
@@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
if (iosize == SJA1000_IOSIZE_INDIRECT) {
priv->read_reg = sja1000_isa_port_read_reg_indirect;
priv->write_reg = sja1000_isa_port_write_reg_indirect;
+ spin_lock_init(&indirect_lock[idx]);
} else {
priv->read_reg = sja1000_isa_port_read_reg;
priv->write_reg = sja1000_isa_port_write_reg;
@@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = idx;
err = register_sja1000dev(dev);
if (err) {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index f5b16e0e3a12..dcf9196f6316 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
return;
- spin_lock(&sl->lock);
+ spin_lock_bh(&sl->lock);
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_unlock(&sl->lock);
+ spin_unlock_bh(&sl->lock);
netif_wake_queue(sl->dev);
return;
}
@@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
actual = tty->ops->write(tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
- spin_unlock(&sl->lock);
+ spin_unlock_bh(&sl->lock);
}
/* Send a can_frame to a TTY queue. */
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 39b26fe28d10..d7401017a3f1 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
+
+config CX_ECAT
+ tristate "Beckhoff CX5020 EtherCAT master support"
+ depends on PCI
+ ---help---
+ Driver for EtherCAT master module located on CCAT FPGA
+ that can be found on Beckhoff CX5020, and possibly other of CX
+ Beckhoff CX series industrial PCs.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ec_bhf.
+
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 545d0b3b9cb4..35190e36c456 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
+obj-$(CONFIG_CX_ECAT) += ec_bhf.o
obj-$(CONFIG_DM9000) += davicom/
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 80c1ab74a4b8..fdddba51473e 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -1,5 +1,6 @@
config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
+ depends on HAS_DMA
select PHYLIB
---help---
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0e38a03b1542..38c500f95b9e 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -18,6 +18,7 @@
#include "altera_utils.h"
#include "altera_tse.h"
#include "altera_msgdmahw.h"
+#include "altera_msgdma.h"
/* No initialization work to do for MSGDMA */
int msgdma_initialize(struct altera_tse_private *priv)
@@ -29,6 +30,10 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
{
}
+void msgdma_start_rxdma(struct altera_tse_private *priv)
+{
+}
+
void msgdma_reset(struct altera_tse_private *priv)
{
int counter;
@@ -151,7 +156,7 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
/* Put buffer to the mSGDMA RX FIFO
*/
-int msgdma_add_rx_desc(struct altera_tse_private *priv,
+void msgdma_add_rx_desc(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
struct msgdma_extended_desc *desc = priv->rx_dma_desc;
@@ -172,7 +177,6 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
iowrite32(0, &desc->burst_seq_num);
iowrite32(0x00010001, &desc->stride);
iowrite32(control, &desc->control);
- return 1;
}
/* status is returned on upper 16 bits,
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
index 7f0f5bf2bba2..42cf61c81057 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.h
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
void msgdma_clear_rxirq(struct altera_tse_private *);
void msgdma_clear_txirq(struct altera_tse_private *);
u32 msgdma_tx_completions(struct altera_tse_private *);
-int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
+void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
u32 msgdma_rx_status(struct altera_tse_private *);
int msgdma_initialize(struct altera_tse_private *);
void msgdma_uninitialize(struct altera_tse_private *);
+void msgdma_start_rxdma(struct altera_tse_private *);
#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 519f0f0bacf0..dbd40e15b5cc 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,15 +20,15 @@
#include "altera_sgdmahw.h"
#include "altera_sgdma.h"
-static void sgdma_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
- dma_addr_t ndesc_phys,
- dma_addr_t raddr,
- dma_addr_t waddr,
- u16 length,
- int generate_eop,
- int rfixed,
- int wfixed);
+static void sgdma_setup_descrip(struct sgdma_descrip *desc,
+ struct sgdma_descrip *ndesc,
+ dma_addr_t ndesc_phys,
+ dma_addr_t raddr,
+ dma_addr_t waddr,
+ u16 length,
+ int generate_eop,
+ int rfixed,
+ int wfixed);
static int sgdma_async_write(struct altera_tse_private *priv,
struct sgdma_descrip *desc);
@@ -64,11 +64,15 @@ queue_rx_peekhead(struct altera_tse_private *priv);
int sgdma_initialize(struct altera_tse_private *priv)
{
- priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+ priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
+ SGDMA_CTRLREG_INTEN;
priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+ SGDMA_CTRLREG_INTEN |
SGDMA_CTRLREG_ILASTD;
+ priv->sgdmadesclen = sizeof(struct sgdma_descrip);
+
INIT_LIST_HEAD(&priv->txlisthd);
INIT_LIST_HEAD(&priv->rxlisthd);
@@ -93,6 +97,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
return -EINVAL;
}
+ /* Initialize descriptor memory to all 0's, sync memory to cache */
+ memset(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+
+ dma_sync_single_for_device(priv->device, priv->txdescphys,
+ priv->txdescmem, DMA_TO_DEVICE);
+
+ dma_sync_single_for_device(priv->device, priv->rxdescphys,
+ priv->rxdescmem, DMA_TO_DEVICE);
+
return 0;
}
@@ -130,26 +144,23 @@ void sgdma_reset(struct altera_tse_private *priv)
iowrite32(0, &prxsgdma->control);
}
+/* For SGDMA, interrupts remain enabled after initially enabling,
+ * so no need to provide implementations for abstract enable
+ * and disable
+ */
+
void sgdma_enable_rxirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->rx_dma_csr;
- priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
}
void sgdma_enable_txirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->tx_dma_csr;
- priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
}
-/* for SGDMA, RX interrupts remain enabled after enabling */
void sgdma_disable_rxirq(struct altera_tse_private *priv)
{
}
-/* for SGDMA, TX interrupts remain enabled after enabling */
void sgdma_disable_txirq(struct altera_tse_private *priv)
{
}
@@ -183,15 +194,15 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
if (sgdma_txbusy(priv))
return 0;
- sgdma_descrip(cdesc, /* current descriptor */
- ndesc, /* next descriptor */
- sgdma_txphysaddr(priv, ndesc),
- buffer->dma_addr, /* address of packet to xmit */
- 0, /* write addr 0 for tx dma */
- buffer->len, /* length of packet */
- SGDMA_CONTROL_EOP, /* Generate EOP */
- 0, /* read fixed */
- SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
+ sgdma_setup_descrip(cdesc, /* current descriptor */
+ ndesc, /* next descriptor */
+ sgdma_txphysaddr(priv, ndesc),
+ buffer->dma_addr, /* address of packet to xmit */
+ 0, /* write addr 0 for tx dma */
+ buffer->len, /* length of packet */
+ SGDMA_CONTROL_EOP, /* Generate EOP */
+ 0, /* read fixed */
+ SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
pktstx = sgdma_async_write(priv, cdesc);
@@ -218,11 +229,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
return ready;
}
-int sgdma_add_rx_desc(struct altera_tse_private *priv,
- struct tse_buffer *rxbuffer)
+void sgdma_start_rxdma(struct altera_tse_private *priv)
+{
+ sgdma_async_read(priv);
+}
+
+void sgdma_add_rx_desc(struct altera_tse_private *priv,
+ struct tse_buffer *rxbuffer)
{
queue_rx(priv, rxbuffer);
- return sgdma_async_read(priv);
}
/* status is returned on upper 16 bits,
@@ -239,28 +254,52 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
unsigned int pktstatus = 0;
struct tse_buffer *rxbuffer = NULL;
- dma_sync_single_for_cpu(priv->device,
- priv->rxdescphys,
- priv->rxdescmem,
- DMA_BIDIRECTIONAL);
+ u32 sts = ioread32(&csr->status);
desc = &base[0];
- if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
- (desc->status & SGDMA_STATUS_EOP)) {
+ if (sts & SGDMA_STSREG_EOP) {
+ dma_sync_single_for_cpu(priv->device,
+ priv->rxdescphys,
+ priv->sgdmadesclen,
+ DMA_FROM_DEVICE);
+
pktlength = desc->bytes_xferred;
pktstatus = desc->status & 0x3f;
rxstatus = pktstatus;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
- desc->status = 0;
+ if (rxstatus) {
+ desc->status = 0;
- rxbuffer = dequeue_rx(priv);
- if (rxbuffer == NULL)
- netdev_err(priv->dev,
- "sgdma rx and rx queue empty!\n");
+ rxbuffer = dequeue_rx(priv);
+ if (rxbuffer == NULL)
+ netdev_info(priv->dev,
+ "sgdma rx and rx queue empty!\n");
+
+ /* Clear control */
+ iowrite32(0, &csr->control);
+ /* clear status */
+ iowrite32(0xf, &csr->status);
- /* kick the rx sgdma after reaping this descriptor */
+ /* kick the rx sgdma after reaping this descriptor */
+ pktsrx = sgdma_async_read(priv);
+
+ } else {
+ /* If the SGDMA indicated an end of packet on recv,
+ * then it's expected that the rxstatus from the
+ * descriptor is non-zero - meaning a valid packet
+ * with a nonzero length, or an error has been
+ * indicated. if not, then all we can do is signal
+ * an error and return no packet received. Most likely
+ * there is a system design error, or an error in the
+ * underlying kernel (cache or cache management problem)
+ */
+ netdev_err(priv->dev,
+ "SGDMA RX Error Info: %x, %x, %x\n",
+ sts, desc->status, rxstatus);
+ }
+ } else if (sts == 0) {
pktsrx = sgdma_async_read(priv);
}
@@ -269,15 +308,15 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
/* Private functions */
-static void sgdma_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
- dma_addr_t ndesc_phys,
- dma_addr_t raddr,
- dma_addr_t waddr,
- u16 length,
- int generate_eop,
- int rfixed,
- int wfixed)
+static void sgdma_setup_descrip(struct sgdma_descrip *desc,
+ struct sgdma_descrip *ndesc,
+ dma_addr_t ndesc_phys,
+ dma_addr_t raddr,
+ dma_addr_t waddr,
+ u16 length,
+ int generate_eop,
+ int rfixed,
+ int wfixed)
{
/* Clear the next descriptor as not owned by hardware */
u32 ctrl = ndesc->control;
@@ -316,35 +355,29 @@ static int sgdma_async_read(struct altera_tse_private *priv)
struct sgdma_descrip *cdesc = &descbase[0];
struct sgdma_descrip *ndesc = &descbase[1];
- unsigned int sts = ioread32(&csr->status);
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
rxbuffer = queue_rx_peekhead(priv);
- if (rxbuffer == NULL)
+ if (rxbuffer == NULL) {
+ netdev_err(priv->dev, "no rx buffers available\n");
return 0;
-
- sgdma_descrip(cdesc, /* current descriptor */
- ndesc, /* next descriptor */
- sgdma_rxphysaddr(priv, ndesc),
- 0, /* read addr 0 for rx dma */
- rxbuffer->dma_addr, /* write addr for rx dma */
- 0, /* read 'til EOP */
- 0, /* EOP: NA for rx dma */
- 0, /* read fixed: NA for rx dma */
- 0); /* SOP: NA for rx DMA */
-
- /* clear control and status */
- iowrite32(0, &csr->control);
-
- /* If status available, clear those bits */
- if (sts & 0xf)
- iowrite32(0xf, &csr->status);
+ }
+
+ sgdma_setup_descrip(cdesc, /* current descriptor */
+ ndesc, /* next descriptor */
+ sgdma_rxphysaddr(priv, ndesc),
+ 0, /* read addr 0 for rx dma */
+ rxbuffer->dma_addr, /* write addr for rx dma */
+ 0, /* read 'til EOP */
+ 0, /* EOP: NA for rx dma */
+ 0, /* read fixed: NA for rx dma */
+ 0); /* SOP: NA for rx DMA */
dma_sync_single_for_device(priv->device,
priv->rxdescphys,
- priv->rxdescmem,
- DMA_BIDIRECTIONAL);
+ priv->sgdmadesclen,
+ DMA_TO_DEVICE);
iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
&csr->next_descrip);
@@ -371,7 +404,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
iowrite32(0x1f, &csr->status);
dma_sync_single_for_device(priv->device, priv->txdescphys,
- priv->txdescmem, DMA_TO_DEVICE);
+ priv->sgdmadesclen, DMA_TO_DEVICE);
iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
&csr->next_descrip);
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
index 07d471729dc4..584977e29ef9 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.h
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
void sgdma_clear_txirq(struct altera_tse_private *);
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
u32 sgdma_tx_completions(struct altera_tse_private *);
-int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
void sgdma_status(struct altera_tse_private *);
u32 sgdma_rx_status(struct altera_tse_private *);
int sgdma_initialize(struct altera_tse_private *);
void sgdma_uninitialize(struct altera_tse_private *);
+void sgdma_start_rxdma(struct altera_tse_private *);
#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 8feeed05de0e..465c4aabebbd 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -58,6 +58,8 @@
/* MAC function configuration default settings */
#define ALTERA_TSE_TX_IPG_LENGTH 12
+#define ALTERA_TSE_PAUSE_QUANTA 0xffff
+
#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
/* MAC Command_Config Register Bit Definitions
@@ -390,10 +392,11 @@ struct altera_dmaops {
void (*clear_rxirq)(struct altera_tse_private *);
int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
u32 (*tx_completions)(struct altera_tse_private *);
- int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
+ void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
u32 (*get_rx_status)(struct altera_tse_private *);
int (*init_dma)(struct altera_tse_private *);
void (*uninit_dma)(struct altera_tse_private *);
+ void (*start_rxdma)(struct altera_tse_private *);
};
/* This structure is private to each device.
@@ -453,6 +456,7 @@ struct altera_tse_private {
u32 rxctrlreg;
dma_addr_t rxdescphys;
dma_addr_t txdescphys;
+ size_t sgdmadesclen;
struct list_head txlisthd;
struct list_head rxlisthd;
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 319ca74f5e74..76133caffa78 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
struct altera_tse_private *priv = netdev_priv(dev);
u32 rev = ioread32(&priv->mac_dev->megacore_revision);
- strcpy(info->driver, "Altera TSE MAC IP Driver");
+ strcpy(info->driver, "altera_tse");
strcpy(info->version, "v8.0");
snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
@@ -185,6 +185,12 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
* how to do any special formatting of this data.
* This version number will need to change if and
* when this register table is changed.
+ *
+ * version[31:0] = 1: Dump the first 128 TSE Registers
+ * Upper bits are all 0 by default
+ *
+ * Upper 16-bits will indicate feature presence for
+ * Ethtool register decoding in future version.
*/
regs->version = 1;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index c70a29e0b9f7..e44a4aeb9701 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -224,6 +224,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
dev_kfree_skb_any(rxbuffer->skb);
return -EINVAL;
}
+ rxbuffer->dma_addr &= (dma_addr_t)~3;
rxbuffer->len = len;
return 0;
}
@@ -425,9 +426,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
priv->dev->stats.rx_bytes += pktlength;
entry = next_entry;
+
+ tse_rx_refill(priv);
}
- tse_rx_refill(priv);
return count;
}
@@ -520,7 +522,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
struct altera_tse_private *priv;
unsigned long int flags;
-
if (unlikely(!dev)) {
pr_err("%s: invalid dev pointer\n", __func__);
return IRQ_NONE;
@@ -868,13 +869,13 @@ static int init_mac(struct altera_tse_private *priv)
/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
* start address
*/
- tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+ tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
/* Set the MAC options */
cmd = ioread32(&mac->command_config);
- cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */
+ cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
* with CRC errors
@@ -882,8 +883,16 @@ static int init_mac(struct altera_tse_private *priv)
cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
cmd &= ~MAC_CMDCFG_TX_ENA;
cmd &= ~MAC_CMDCFG_RX_ENA;
+
+ /* Default speed and duplex setting, full/100 */
+ cmd &= ~MAC_CMDCFG_HD_ENA;
+ cmd &= ~MAC_CMDCFG_ETH_SPEED;
+ cmd &= ~MAC_CMDCFG_ENA_10;
+
iowrite32(cmd, &mac->command_config);
+ iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+
if (netif_msg_hw(priv))
dev_dbg(priv->device,
"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
@@ -1085,17 +1094,19 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- /* Start MAC Rx/Tx */
- spin_lock(&priv->mac_cfg_lock);
- tse_set_mac(priv, true);
- spin_unlock(&priv->mac_cfg_lock);
-
if (priv->phydev)
phy_start(priv->phydev);
napi_enable(&priv->napi);
netif_start_queue(dev);
+ priv->dmaops->start_rxdma(priv);
+
+ /* Start MAC Rx/Tx */
+ spin_lock(&priv->mac_cfg_lock);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+
return 0;
tx_request_irq_error:
@@ -1167,7 +1178,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
@@ -1235,7 +1245,7 @@ static int altera_tse_probe(struct platform_device *pdev)
/* Get the mapped address to the SGDMA descriptor memory */
ret = request_and_map(pdev, "s1", &dma_res, &descmap);
if (ret)
- goto out_free;
+ goto err_free_netdev;
/* Start of that memory is for transmit descriptors */
priv->tx_dma_desc = descmap;
@@ -1254,24 +1264,24 @@ static int altera_tse_probe(struct platform_device *pdev)
if (upper_32_bits(priv->rxdescmem_busaddr)) {
dev_dbg(priv->device,
"SGDMA bus addresses greater than 32-bits\n");
- goto out_free;
+ goto err_free_netdev;
}
if (upper_32_bits(priv->txdescmem_busaddr)) {
dev_dbg(priv->device,
"SGDMA bus addresses greater than 32-bits\n");
- goto out_free;
+ goto err_free_netdev;
}
} else if (priv->dmaops &&
priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
ret = request_and_map(pdev, "rx_resp", &dma_res,
&priv->rx_dma_resp);
if (ret)
- goto out_free;
+ goto err_free_netdev;
ret = request_and_map(pdev, "tx_desc", &dma_res,
&priv->tx_dma_desc);
if (ret)
- goto out_free;
+ goto err_free_netdev;
priv->txdescmem = resource_size(dma_res);
priv->txdescmem_busaddr = dma_res->start;
@@ -1279,13 +1289,13 @@ static int altera_tse_probe(struct platform_device *pdev)
ret = request_and_map(pdev, "rx_desc", &dma_res,
&priv->rx_dma_desc);
if (ret)
- goto out_free;
+ goto err_free_netdev;
priv->rxdescmem = resource_size(dma_res);
priv->rxdescmem_busaddr = dma_res->start;
} else {
- goto out_free;
+ goto err_free_netdev;
}
if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
@@ -1294,26 +1304,26 @@ static int altera_tse_probe(struct platform_device *pdev)
else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
else
- goto out_free;
+ goto err_free_netdev;
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,
(void __iomem **)&priv->mac_dev);
if (ret)
- goto out_free;
+ goto err_free_netdev;
/* xSGDMA Rx Dispatcher address space */
ret = request_and_map(pdev, "rx_csr", &dma_res,
&priv->rx_dma_csr);
if (ret)
- goto out_free;
+ goto err_free_netdev;
/* xSGDMA Tx Dispatcher address space */
ret = request_and_map(pdev, "tx_csr", &dma_res,
&priv->tx_dma_csr);
if (ret)
- goto out_free;
+ goto err_free_netdev;
/* Rx IRQ */
@@ -1321,7 +1331,7 @@ static int altera_tse_probe(struct platform_device *pdev)
if (priv->rx_irq == -ENXIO) {
dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
ret = -ENXIO;
- goto out_free;
+ goto err_free_netdev;
}
/* Tx IRQ */
@@ -1329,7 +1339,7 @@ static int altera_tse_probe(struct platform_device *pdev)
if (priv->tx_irq == -ENXIO) {
dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
ret = -ENXIO;
- goto out_free;
+ goto err_free_netdev;
}
/* get FIFO depths from device tree */
@@ -1337,14 +1347,14 @@ static int altera_tse_probe(struct platform_device *pdev)
&priv->rx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
ret = -ENXIO;
- goto out_free;
+ goto err_free_netdev;
}
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
&priv->rx_fifo_depth)) {
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
ret = -ENXIO;
- goto out_free;
+ goto err_free_netdev;
}
/* get hash filter settings for this instance */
@@ -1393,7 +1403,7 @@ static int altera_tse_probe(struct platform_device *pdev)
((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
priv->phy_addr);
- goto out_free;
+ goto err_free_netdev;
}
/* Create/attach to MDIO bus */
@@ -1401,7 +1411,7 @@ static int altera_tse_probe(struct platform_device *pdev)
atomic_add_return(1, &instance_count));
if (ret)
- goto out_free;
+ goto err_free_netdev;
/* initialize netdev */
ether_setup(ndev);
@@ -1438,7 +1448,7 @@ static int altera_tse_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "failed to register TSE net device\n");
- goto out_free_mdio;
+ goto err_register_netdev;
}
platform_set_drvdata(pdev, ndev);
@@ -1455,13 +1465,16 @@ static int altera_tse_probe(struct platform_device *pdev)
ret = init_phy(ndev);
if (ret != 0) {
netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
- goto out_free_mdio;
+ goto err_init_phy;
}
return 0;
-out_free_mdio:
+err_init_phy:
+ unregister_netdev(ndev);
+err_register_netdev:
+ netif_napi_del(&priv->napi);
altera_tse_mdio_destroy(ndev);
-out_free:
+err_free_netdev:
free_netdev(ndev);
return ret;
}
@@ -1496,6 +1509,7 @@ struct altera_dmaops altera_dtype_sgdma = {
.get_rx_status = sgdma_rx_status,
.init_dma = sgdma_initialize,
.uninit_dma = sgdma_uninitialize,
+ .start_rxdma = sgdma_start_rxdma,
};
struct altera_dmaops altera_dtype_msgdma = {
@@ -1514,6 +1528,7 @@ struct altera_dmaops altera_dtype_msgdma = {
.get_rx_status = msgdma_rx_status,
.init_dma = msgdma_initialize,
.uninit_dma = msgdma_uninitialize,
+ .start_rxdma = msgdma_start_rxdma,
};
static struct of_device_id altera_tse_ids[] = {
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index 928fac6dd10a..53f85bf71526 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -11,6 +11,7 @@
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/clk.h>
/* STATUS and ENABLE Register bit masks */
#define TXINT_MASK (1<<0) /* Transmit interrupt */
@@ -131,6 +132,7 @@ struct arc_emac_priv {
struct mii_bus *bus;
void __iomem *regs;
+ struct clk *clk;
struct napi_struct napi;
struct net_device_stats stats;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 9f45782819ec..d647a7d115ac 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -649,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev)
return -ENODEV;
}
- /* Get CPU clock frequency from device tree */
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &clock_frequency)) {
- dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
- return -EINVAL;
- }
-
/* Get IRQ from device tree */
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!irq) {
@@ -683,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev)
priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
if (IS_ERR(priv->regs)) {
err = PTR_ERR(priv->regs);
- goto out;
+ goto out_netdev;
}
dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
+ priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ if (IS_ERR(priv->clk)) {
+ /* Get CPU clock frequency from device tree */
+ if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &clock_frequency)) {
+ dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
+ err = -EINVAL;
+ goto out_netdev;
+ }
+ } else {
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ goto out_clkget;
+ }
+
+ clock_frequency = clk_get_rate(priv->clk);
+ }
+
id = arc_reg_get(priv, R_ID);
/* Check for EMAC revision 5 or 7, magic number */
if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
err = -ENODEV;
- goto out;
+ goto out_clken;
}
dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
@@ -708,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev)
ndev->name, ndev);
if (err) {
dev_err(&pdev->dev, "could not allocate IRQ\n");
- goto out;
+ goto out_clken;
}
/* Get MAC address from device tree */
@@ -729,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev)
if (!priv->rxbd) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
- goto out;
+ goto out_clken;
}
priv->txbd = priv->rxbd + RX_BD_NUM;
@@ -741,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev)
err = arc_mdio_probe(pdev, priv);
if (err) {
dev_err(&pdev->dev, "failed to probe MII bus\n");
- goto out;
+ goto out_clken;
}
priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
@@ -749,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev)
if (!priv->phy_dev) {
dev_err(&pdev->dev, "of_phy_connect() failed\n");
err = -ENODEV;
- goto out;
+ goto out_mdio;
}
dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
@@ -759,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev)
err = register_netdev(ndev);
if (err) {
- netif_napi_del(&priv->napi);
dev_err(&pdev->dev, "failed to register network device\n");
- goto out;
+ goto out_netif_api;
}
return 0;
-out:
+out_netif_api:
+ netif_napi_del(&priv->napi);
+ phy_disconnect(priv->phy_dev);
+ priv->phy_dev = NULL;
+out_mdio:
+ arc_mdio_remove(priv);
+out_clken:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+out_clkget:
+ if (!IS_ERR(priv->clk))
+ clk_put(priv->clk);
+out_netdev:
free_netdev(ndev);
return err;
}
@@ -781,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev)
arc_mdio_remove(priv);
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
+
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+ }
+
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index a78edaccceee..3b0d43154e67 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
-#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4))
-#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4))
+#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
+#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
{
u8 major, minor, version;
@@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Reset should be performed after BRB is emptied */
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
u32 timer_count = 1000;
+ bool need_write = true;
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
* cleaning methods - might be redundant but harmless.
*/
if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) {
- bnx2x_prev_unload_undi_mf(bp);
+ if (need_write) {
+ bnx2x_prev_unload_undi_mf(bp);
+ need_write = false;
+ }
} else if (prev_undi) {
/* If UNDI resides in memory,
* manually increment it
@@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
iounmap(bp->doorbells);
bnx2x_release_firmware(bp);
+ } else {
+ bnx2x_vf_pci_dealloc(bp);
}
bnx2x_free_mem_bp(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 5c523b32db70..81cc2d9831c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
(atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
vf_vlan_rules_cnt(vf))) {
- BNX2X_ERR("No credits for vlan\n");
+ BNX2X_ERR("No credits for vlan [%d >= %d]\n",
+ atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
+ vf_vlan_rules_cnt(vf));
return -ENOMEM;
}
@@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
/* add new mcasts */
+ mcast.mcast_list_len = mc_num;
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
if (rc)
BNX2X_ERR("Faled to add multicasts\n");
@@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
return 0;
}
+static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ int new)
+{
+ int num = vf_vlan_rules_cnt(vf);
+ int diff = new - num;
+ bool rc = true;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
+ vf->abs_vfid, new, num);
+
+ if (diff > 0)
+ rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
+ else if (diff < 0)
+ rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
+
+ if (rc)
+ vf_vlan_rules_cnt(vf) = new;
+ else
+ DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
+ vf->abs_vfid);
+}
+
/* must be called after the number of PF queues and the number of VFs are
* both known
*/
@@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
resc->num_mac_filters = 1;
/* divvy up vlan rules */
+ bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
vlan_count = 1 << ilog2(vlan_count);
- resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+ bnx2x_iov_re_set_vlan_filters(bp, vf,
+ vlan_count / BNX2X_NR_VIRTFN(bp));
/* no real limitation */
resc->num_mc_filters = 0;
@@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */
-
- /* reserve the vf vlan credit */
- bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
-
vf->filter_state = 0;
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
@@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ /* Save a vlan filter for the Hypervisor */
return ((req_resc->num_rxqs <= rxq_cnt) &&
(req_resc->num_txqs <= txq_cnt) &&
(req_resc->num_sbs <= vf_sb_count(vf)) &&
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
- (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+ (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
}
/* CORE VF API */
@@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
if (resc->num_mac_filters)
vf_mac_rules_cnt(vf) = resc->num_mac_filters;
- if (resc->num_vlan_filters)
- vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+ /* Add an additional vlan filter credit for the hypervisor */
+ bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
DP(BNX2X_MSG_IOV,
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
vf_sb_count(vf), vf_rxq_count(vf),
vf_txq_count(vf), vf_mac_rules_cnt(vf),
- vf_vlan_rules_cnt(vf));
+ vf_vlan_rules_visible_cnt(vf));
/* Initialize the queues */
if (!vf->vfqs) {
@@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return bp->regview + PXP_VF_ADDR_DB_START;
}
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+}
+
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
mutex_init(&bp->vf2pf_mutex);
@@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
return 0;
alloc_mem_err:
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
- sizeof(union pf_vf_bulletin));
+ bnx2x_vf_pci_dealloc(bp);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8bf764570eef..6929adba52f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -159,6 +159,8 @@ struct bnx2x_virtf {
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+ /* Hide a single vlan filter credit for the hypervisor */
+#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
u8 sb_count; /* actual number of SBs */
u8 igu_base_id; /* base igu status block id */
@@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
void bnx2x_timer_sriov(struct bnx2x *bp);
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
+void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
@@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return NULL;
}
+static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0622884596b2..0c067e8564dd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_max_queue_cnt(bp, vf);
resc->num_sbs = vf_sb_count(vf);
resc->num_mac_filters = vf_mac_rules_cnt(vf);
- resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
resc->num_mc_filters = 0;
if (status == PFVF_STATUS_SUCCESS) {
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 7e49c43b7af3..9e089d24466e 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
config NET_CADENCE
bool "Cadence devices"
- depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
+ depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
config MACB
tristate "Cadence MACB/GEM support"
- depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
+ depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
select PHYLIB
---help---
The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca97005e24b4..e9daa072ebb4 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
{
unsigned int entry;
struct sk_buff *skb;
- struct macb_dma_desc *desc;
dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
- u32 addr, ctrl;
-
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
- desc = &bp->rx_ring[entry];
/* Make hw descriptor updates visible to CPU */
rmb();
- addr = desc->addr;
- ctrl = desc->ctrl;
bp->rx_prepared_head++;
- if ((addr & MACB_BIT(RX_USED)))
- continue;
-
if (bp->rx_skbuff[entry] == NULL) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
@@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
if (!(addr & MACB_BIT(RX_USED)))
break;
- desc->addr &= ~MACB_BIT(RX_USED);
bp->rx_tail++;
count++;
@@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
- /*
- * We've done what we can to clean the buffers. Make sure we
- * get notified when new packets arrive.
- */
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
-
/* Packets received while interrupts were disabled */
status = macb_readl(bp, RSR);
- if (unlikely(status))
+ if (status) {
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
+ } else {
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
}
/* TODO: Handle errors */
@@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
schedule_work(&bp->tx_error_task);
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
break;
}
@@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
}
if (status & MACB_BIT(HRESP)) {
@@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* (work queue?)
*/
netdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ macb_writel(bp, ISR, MACB_BIT(HRESP));
}
status = macb_readl(bp, ISR);
@@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
desc = &bp->rx_ring[i];
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
- dma_unmap_single(&bp->pdev->dev, addr, skb->len,
+ dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
skb = NULL;
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index d40c994a4f6a..570222c33410 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -67,13 +67,13 @@ config CHELSIO_T3
will be called cxgb3.
config CHELSIO_T4
- tristate "Chelsio Communications T4 Ethernet support"
+ tristate "Chelsio Communications T4/T5 Ethernet support"
depends on PCI
select FW_LOADER
select MDIO
---help---
- This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
- adapters.
+ This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+ adapter and T5 based 40Gb Ethernet adapter.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
@@ -87,11 +87,12 @@ config CHELSIO_T4
will be called cxgb4.
config CHELSIO_T4VF
- tristate "Chelsio Communications T4 Virtual Function Ethernet support"
+ tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
depends on PCI
---help---
- This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
- adapters with PCI-E SR-IOV Virtual Functions.
+ This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
+ adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
+ Functions.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index bf5eb3310b0e..8efeed3325b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5877,6 +5877,8 @@ static void print_port_info(const struct net_device *dev)
spd = " 2.5 GT/s";
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
spd = " 5 GT/s";
+ else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
+ spd = " 8 GT/s";
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
bufp += sprintf(bufp, "100/");
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
new file mode 100644
index 000000000000..4884205e56ee
--- /dev/null
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -0,0 +1,706 @@
+ /*
+ * drivers/net/ethernet/beckhoff/ec_bhf.c
+ *
+ * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* This is a driver for EtherCAT master module present on CCAT FPGA.
+ * Those can be found on Bechhoff CX50xx industrial PCs.
+ */
+
+#if 0
+#define DEBUG
+#endif
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/stat.h>
+
+#define TIMER_INTERVAL_NSEC 20000
+
+#define INFO_BLOCK_SIZE 0x10
+#define INFO_BLOCK_TYPE 0x0
+#define INFO_BLOCK_REV 0x2
+#define INFO_BLOCK_BLK_CNT 0x4
+#define INFO_BLOCK_TX_CHAN 0x4
+#define INFO_BLOCK_RX_CHAN 0x5
+#define INFO_BLOCK_OFFSET 0x8
+
+#define EC_MII_OFFSET 0x4
+#define EC_FIFO_OFFSET 0x8
+#define EC_MAC_OFFSET 0xc
+
+#define MAC_FRAME_ERR_CNT 0x0
+#define MAC_RX_ERR_CNT 0x1
+#define MAC_CRC_ERR_CNT 0x2
+#define MAC_LNK_LST_ERR_CNT 0x3
+#define MAC_TX_FRAME_CNT 0x10
+#define MAC_RX_FRAME_CNT 0x14
+#define MAC_TX_FIFO_LVL 0x20
+#define MAC_DROPPED_FRMS 0x28
+#define MAC_CONNECTED_CCAT_FLAG 0x78
+
+#define MII_MAC_ADDR 0x8
+#define MII_MAC_FILT_FLAG 0xe
+#define MII_LINK_STATUS 0xf
+
+#define FIFO_TX_REG 0x0
+#define FIFO_TX_RESET 0x8
+#define FIFO_RX_REG 0x10
+#define FIFO_RX_ADDR_VALID (1u << 31)
+#define FIFO_RX_RESET 0x18
+
+#define DMA_CHAN_OFFSET 0x1000
+#define DMA_CHAN_SIZE 0x8
+
+#define DMA_WINDOW_SIZE_MASK 0xfffffffc
+
+static struct pci_device_id ids[] = {
+ { PCI_DEVICE(0x15ec, 0x5000), },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, ids);
+
+struct rx_header {
+#define RXHDR_NEXT_ADDR_MASK 0xffffffu
+#define RXHDR_NEXT_VALID (1u << 31)
+ __le32 next;
+#define RXHDR_NEXT_RECV_FLAG 0x1
+ __le32 recv;
+#define RXHDR_LEN_MASK 0xfffu
+ __le16 len;
+ __le16 port;
+ __le32 reserved;
+ u8 timestamp[8];
+} __packed;
+
+#define PKT_PAYLOAD_SIZE 0x7e8
+struct rx_desc {
+ struct rx_header header;
+ u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+struct tx_header {
+ __le16 len;
+#define TX_HDR_PORT_0 0x1
+#define TX_HDR_PORT_1 0x2
+ u8 port;
+ u8 ts_enable;
+#define TX_HDR_SENT 0x1
+ __le32 sent;
+ u8 timestamp[8];
+} __packed;
+
+struct tx_desc {
+ struct tx_header header;
+ u8 data[PKT_PAYLOAD_SIZE];
+} __packed;
+
+#define FIFO_SIZE 64
+
+static long polling_frequency = TIMER_INTERVAL_NSEC;
+
+struct bhf_dma {
+ u8 *buf;
+ size_t len;
+ dma_addr_t buf_phys;
+
+ u8 *alloc;
+ size_t alloc_len;
+ dma_addr_t alloc_phys;
+};
+
+struct ec_bhf_priv {
+ struct net_device *net_dev;
+
+ struct pci_dev *dev;
+
+ void * __iomem io;
+ void * __iomem dma_io;
+
+ struct hrtimer hrtimer;
+
+ int tx_dma_chan;
+ int rx_dma_chan;
+ void * __iomem ec_io;
+ void * __iomem fifo_io;
+ void * __iomem mii_io;
+ void * __iomem mac_io;
+
+ struct bhf_dma rx_buf;
+ struct rx_desc *rx_descs;
+ int rx_dnext;
+ int rx_dcount;
+
+ struct bhf_dma tx_buf;
+ struct tx_desc *tx_descs;
+ int tx_dcount;
+ int tx_dnext;
+
+ u64 stat_rx_bytes;
+ u64 stat_tx_bytes;
+};
+
+#define PRIV_TO_DEV(priv) (&(priv)->dev->dev)
+
+#define ETHERCAT_MASTER_ID 0x14
+
+static void ec_bhf_print_status(struct ec_bhf_priv *priv)
+{
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ dev_dbg(dev, "Frame error counter: %d\n",
+ ioread8(priv->mac_io + MAC_FRAME_ERR_CNT));
+ dev_dbg(dev, "RX error counter: %d\n",
+ ioread8(priv->mac_io + MAC_RX_ERR_CNT));
+ dev_dbg(dev, "CRC error counter: %d\n",
+ ioread8(priv->mac_io + MAC_CRC_ERR_CNT));
+ dev_dbg(dev, "TX frame counter: %d\n",
+ ioread32(priv->mac_io + MAC_TX_FRAME_CNT));
+ dev_dbg(dev, "RX frame counter: %d\n",
+ ioread32(priv->mac_io + MAC_RX_FRAME_CNT));
+ dev_dbg(dev, "TX fifo level: %d\n",
+ ioread8(priv->mac_io + MAC_TX_FIFO_LVL));
+ dev_dbg(dev, "Dropped frames: %d\n",
+ ioread8(priv->mac_io + MAC_DROPPED_FRMS));
+ dev_dbg(dev, "Connected with CCAT slot: %d\n",
+ ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG));
+ dev_dbg(dev, "Link status: %d\n",
+ ioread8(priv->mii_io + MII_LINK_STATUS));
+}
+
+static void ec_bhf_reset(struct ec_bhf_priv *priv)
+{
+ iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT);
+ iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT);
+ iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT);
+ iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT);
+ iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS);
+
+ iowrite8(0, priv->fifo_io + FIFO_TX_RESET);
+ iowrite8(0, priv->fifo_io + FIFO_RX_RESET);
+
+ iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL);
+}
+
+static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc)
+{
+ u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header);
+ u32 addr = (u8 *)desc - priv->tx_buf.buf;
+
+ iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG);
+
+ dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n");
+}
+
+static int ec_bhf_desc_sent(struct tx_desc *desc)
+{
+ return le32_to_cpu(desc->header.sent) & TX_HDR_SENT;
+}
+
+static void ec_bhf_process_tx(struct ec_bhf_priv *priv)
+{
+ if (unlikely(netif_queue_stopped(priv->net_dev))) {
+ /* Make sure that we perceive changes to tx_dnext. */
+ smp_rmb();
+
+ if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext]))
+ netif_wake_queue(priv->net_dev);
+ }
+}
+
+static int ec_bhf_pkt_received(struct rx_desc *desc)
+{
+ return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG;
+}
+
+static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc)
+{
+ iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf),
+ priv->fifo_io + FIFO_RX_REG);
+}
+
+static void ec_bhf_process_rx(struct ec_bhf_priv *priv)
+{
+ struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext];
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ while (ec_bhf_pkt_received(desc)) {
+ int pkt_size = (le16_to_cpu(desc->header.len) &
+ RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4;
+ u8 *data = desc->data;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size);
+ dev_dbg(dev, "Received packet, size: %d\n", pkt_size);
+
+ if (skb) {
+ memcpy(skb_put(skb, pkt_size), data, pkt_size);
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ dev_dbg(dev, "Protocol type: %x\n", skb->protocol);
+
+ priv->stat_rx_bytes += pkt_size;
+
+ netif_rx(skb);
+ } else {
+ dev_err_ratelimited(dev,
+ "Couldn't allocate a skb_buff for a packet of size %u\n",
+ pkt_size);
+ }
+
+ desc->header.recv = 0;
+
+ ec_bhf_add_rx_desc(priv, desc);
+
+ priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount;
+ desc = &priv->rx_descs[priv->rx_dnext];
+ }
+
+}
+
+static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer)
+{
+ struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv,
+ hrtimer);
+ ec_bhf_process_rx(priv);
+ ec_bhf_process_tx(priv);
+
+ if (!netif_running(priv->net_dev))
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward_now(timer, ktime_set(0, polling_frequency));
+ return HRTIMER_RESTART;
+}
+
+static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
+{
+ struct device *dev = PRIV_TO_DEV(priv);
+ unsigned block_count, i;
+ void * __iomem ec_info;
+
+ dev_dbg(dev, "Info block:\n");
+ dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
+ dev_dbg(dev, "Revision of function: %x\n",
+ (unsigned)ioread16(priv->io + INFO_BLOCK_REV));
+
+ block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT);
+ dev_dbg(dev, "Number of function blocks: %x\n", block_count);
+
+ for (i = 0; i < block_count; i++) {
+ u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE +
+ INFO_BLOCK_TYPE);
+ if (type == ETHERCAT_MASTER_ID)
+ break;
+ }
+ if (i == block_count) {
+ dev_err(dev, "EtherCAT master with DMA block not found\n");
+ return -ENODEV;
+ }
+ dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i);
+
+ ec_info = priv->io + i * INFO_BLOCK_SIZE;
+ dev_dbg(dev, "EtherCAT master revision: %d\n",
+ ioread16(ec_info + INFO_BLOCK_REV));
+
+ priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN);
+ dev_dbg(dev, "EtherCAT master tx dma channel: %d\n",
+ priv->tx_dma_chan);
+
+ priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN);
+ dev_dbg(dev, "EtherCAT master rx dma channel: %d\n",
+ priv->rx_dma_chan);
+
+ priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET);
+ priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET);
+ priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET);
+ priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET);
+
+ dev_dbg(dev,
+ "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n",
+ priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io);
+
+ return 0;
+}
+
+static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct tx_desc *desc;
+ unsigned len;
+
+ dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n");
+
+ desc = &priv->tx_descs[priv->tx_dnext];
+
+ skb_copy_and_csum_dev(skb, desc->data);
+ len = skb->len;
+
+ memset(&desc->header, 0, sizeof(desc->header));
+ desc->header.len = cpu_to_le16(len);
+ desc->header.port = TX_HDR_PORT_0;
+
+ ec_bhf_send_packet(priv, desc);
+
+ priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount;
+
+ if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) {
+ /* Make sure that update updates to tx_dnext are perceived
+ * by timer routine.
+ */
+ smp_wmb();
+
+ netif_stop_queue(net_dev);
+
+ dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n");
+ ec_bhf_print_status(priv);
+ }
+
+ priv->stat_tx_bytes += len;
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv,
+ struct bhf_dma *buf,
+ int channel,
+ int size)
+{
+ int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET;
+ struct device *dev = PRIV_TO_DEV(priv);
+ u32 mask;
+
+ iowrite32(0xffffffff, priv->dma_io + offset);
+
+ mask = ioread32(priv->dma_io + offset);
+ mask &= DMA_WINDOW_SIZE_MASK;
+ dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel);
+
+ /* We want to allocate a chunk of memory that is:
+ * - aligned to the mask we just read
+ * - is of size 2^mask bytes (at most)
+ * In order to ensure that we will allocate buffer of
+ * 2 * 2^mask bytes.
+ */
+ buf->len = min_t(int, ~mask + 1, size);
+ buf->alloc_len = 2 * buf->len;
+
+ dev_dbg(dev, "Allocating %d bytes for channel %d",
+ (int)buf->alloc_len, channel);
+ buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys,
+ GFP_KERNEL);
+ if (buf->alloc == NULL) {
+ dev_info(dev, "Failed to allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ buf->buf_phys = (buf->alloc_phys + buf->len) & mask;
+ buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys);
+
+ iowrite32(0, priv->dma_io + offset + 4);
+ iowrite32(buf->buf_phys, priv->dma_io + offset);
+ dev_dbg(dev, "Buffer: %x and read from dev: %x",
+ (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset));
+
+ return 0;
+}
+
+static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv)
+{
+ int i = 0;
+
+ priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc);
+ priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf;
+ priv->tx_dnext = 0;
+
+ for (i = 0; i < priv->tx_dcount; i++)
+ priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT);
+}
+
+static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv)
+{
+ int i;
+
+ priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc);
+ priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf;
+ priv->rx_dnext = 0;
+
+ for (i = 0; i < priv->rx_dcount; i++) {
+ struct rx_desc *desc = &priv->rx_descs[i];
+ u32 next;
+
+ if (i != priv->rx_dcount - 1)
+ next = (u8 *)(desc + 1) - priv->rx_buf.buf;
+ else
+ next = 0;
+ next |= RXHDR_NEXT_VALID;
+ desc->header.next = cpu_to_le32(next);
+ desc->header.recv = 0;
+ ec_bhf_add_rx_desc(priv, desc);
+ }
+}
+
+static int ec_bhf_open(struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct device *dev = PRIV_TO_DEV(priv);
+ int err = 0;
+
+ dev_info(dev, "Opening device\n");
+
+ ec_bhf_reset(priv);
+
+ err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan,
+ FIFO_SIZE * sizeof(struct rx_desc));
+ if (err) {
+ dev_err(dev, "Failed to allocate rx buffer\n");
+ goto out;
+ }
+ ec_bhf_setup_rx_descs(priv);
+
+ dev_info(dev, "RX buffer allocated, address: %x\n",
+ (unsigned)priv->rx_buf.buf_phys);
+
+ err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan,
+ FIFO_SIZE * sizeof(struct tx_desc));
+ if (err) {
+ dev_err(dev, "Failed to allocate tx buffer\n");
+ goto error_rx_free;
+ }
+ dev_dbg(dev, "TX buffer allocated, addres: %x\n",
+ (unsigned)priv->tx_buf.buf_phys);
+
+ iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG);
+
+ ec_bhf_setup_tx_descs(priv);
+
+ netif_start_queue(net_dev);
+
+ hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->hrtimer.function = ec_bhf_timer_fun;
+ hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency),
+ HRTIMER_MODE_REL);
+
+ dev_info(PRIV_TO_DEV(priv), "Device open\n");
+
+ ec_bhf_print_status(priv);
+
+ return 0;
+
+error_rx_free:
+ dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc,
+ priv->rx_buf.alloc_len);
+out:
+ return err;
+}
+
+static int ec_bhf_stop(struct net_device *net_dev)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+ struct device *dev = PRIV_TO_DEV(priv);
+
+ hrtimer_cancel(&priv->hrtimer);
+
+ ec_bhf_reset(priv);
+
+ netif_tx_disable(net_dev);
+
+ dma_free_coherent(dev, priv->tx_buf.alloc_len,
+ priv->tx_buf.alloc, priv->tx_buf.alloc_phys);
+ dma_free_coherent(dev, priv->rx_buf.alloc_len,
+ priv->rx_buf.alloc, priv->rx_buf.alloc_phys);
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *
+ec_bhf_get_stats(struct net_device *net_dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+ stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) +
+ ioread8(priv->mac_io + MAC_CRC_ERR_CNT) +
+ ioread8(priv->mac_io + MAC_FRAME_ERR_CNT);
+ stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT);
+ stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT);
+ stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS);
+
+ stats->tx_bytes = priv->stat_tx_bytes;
+ stats->rx_bytes = priv->stat_rx_bytes;
+
+ return stats;
+}
+
+static const struct net_device_ops ec_bhf_netdev_ops = {
+ .ndo_start_xmit = ec_bhf_start_xmit,
+ .ndo_open = ec_bhf_open,
+ .ndo_stop = ec_bhf_stop,
+ .ndo_get_stats64 = ec_bhf_get_stats,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr
+};
+
+static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct net_device *net_dev;
+ struct ec_bhf_priv *priv;
+ void * __iomem dma_io;
+ void * __iomem io;
+ int err = 0;
+
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+
+ pci_set_master(dev);
+
+ err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->dev,
+ "Required dma mask not supported, failed to initialize device\n");
+ err = -EIO;
+ goto err_disable_dev;
+ }
+
+ err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&dev->dev,
+ "Required dma mask not supported, failed to initialize device\n");
+ goto err_disable_dev;
+ }
+
+ err = pci_request_regions(dev, "ec_bhf");
+ if (err) {
+ dev_err(&dev->dev, "Failed to request pci memory regions\n");
+ goto err_disable_dev;
+ }
+
+ io = pci_iomap(dev, 0, 0);
+ if (!io) {
+ dev_err(&dev->dev, "Failed to map pci card memory bar 0");
+ err = -EIO;
+ goto err_release_regions;
+ }
+
+ dma_io = pci_iomap(dev, 2, 0);
+ if (!dma_io) {
+ dev_err(&dev->dev, "Failed to map pci card memory bar 2");
+ err = -EIO;
+ goto err_unmap;
+ }
+
+ net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
+ if (net_dev == 0) {
+ err = -ENOMEM;
+ goto err_unmap_dma_io;
+ }
+
+ pci_set_drvdata(dev, net_dev);
+ SET_NETDEV_DEV(net_dev, &dev->dev);
+
+ net_dev->features = 0;
+ net_dev->flags |= IFF_NOARP;
+
+ net_dev->netdev_ops = &ec_bhf_netdev_ops;
+
+ priv = netdev_priv(net_dev);
+ priv->net_dev = net_dev;
+ priv->io = io;
+ priv->dma_io = dma_io;
+ priv->dev = dev;
+
+ err = ec_bhf_setup_offsets(priv);
+ if (err < 0)
+ goto err_free_net_dev;
+
+ memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+
+ dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n",
+ net_dev->dev_addr);
+
+ err = register_netdev(net_dev);
+ if (err < 0)
+ goto err_free_net_dev;
+
+ return 0;
+
+err_free_net_dev:
+ free_netdev(net_dev);
+err_unmap_dma_io:
+ pci_iounmap(dev, dma_io);
+err_unmap:
+ pci_iounmap(dev, io);
+err_release_regions:
+ pci_release_regions(dev);
+err_disable_dev:
+ pci_clear_master(dev);
+ pci_disable_device(dev);
+
+ return err;
+}
+
+static void ec_bhf_remove(struct pci_dev *dev)
+{
+ struct net_device *net_dev = pci_get_drvdata(dev);
+ struct ec_bhf_priv *priv = netdev_priv(net_dev);
+
+ unregister_netdev(net_dev);
+ free_netdev(net_dev);
+
+ pci_iounmap(dev, priv->dma_io);
+ pci_iounmap(dev, priv->io);
+ pci_release_regions(dev);
+ pci_clear_master(dev);
+ pci_disable_device(dev);
+}
+
+static struct pci_driver pci_driver = {
+ .name = "ec_bhf",
+ .id_table = ids,
+ .probe = ec_bhf_probe,
+ .remove = ec_bhf_remove,
+};
+
+static int __init ec_bhf_init(void)
+{
+ return pci_register_driver(&pci_driver);
+}
+
+static void __exit ec_bhf_exit(void)
+{
+ pci_unregister_driver(&pci_driver);
+}
+
+module_init(ec_bhf_init);
+module_exit(ec_bhf_exit);
+
+module_param(polling_frequency, long, S_IRUGO);
+MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 9125d9abf099..e2d42475b006 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
+static noinline void gfar_update_link_state(struct gfar_private *priv);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
@@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
return IRQ_HANDLED;
}
-static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
-{
- struct phy_device *phydev = priv->phydev;
- u32 val = 0;
-
- if (!phydev->duplex)
- return val;
-
- if (!priv->pause_aneg_en) {
- if (priv->tx_pause_en)
- val |= MACCFG1_TX_FLOW;
- if (priv->rx_pause_en)
- val |= MACCFG1_RX_FLOW;
- } else {
- u16 lcl_adv, rmt_adv;
- u8 flowctrl;
- /* get link partner capabilities */
- rmt_adv = 0;
- if (phydev->pause)
- rmt_adv = LPA_PAUSE_CAP;
- if (phydev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
-
- lcl_adv = mii_advertise_flowctrl(phydev->advertising);
-
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
- if (flowctrl & FLOW_CTRL_TX)
- val |= MACCFG1_TX_FLOW;
- if (flowctrl & FLOW_CTRL_RX)
- val |= MACCFG1_RX_FLOW;
- }
-
- return val;
-}
-
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
@@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
struct phy_device *phydev = priv->phydev;
- int new_state = 0;
- if (test_bit(GFAR_RESETTING, &priv->state))
- return;
-
- if (phydev->link) {
- u32 tempval1 = gfar_read(&regs->maccfg1);
- u32 tempval = gfar_read(&regs->maccfg2);
- u32 ecntrl = gfar_read(&regs->ecntrl);
-
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode.
- */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- tempval &= ~(MACCFG2_FULL_DUPLEX);
- else
- tempval |= MACCFG2_FULL_DUPLEX;
-
- priv->oldduplex = phydev->duplex;
- }
-
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- tempval =
- ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
-
- ecntrl &= ~(ECNTRL_R100);
- break;
- case 100:
- case 10:
- tempval =
- ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
-
- /* Reduced mode distinguishes
- * between 10 and 100
- */
- if (phydev->speed == SPEED_100)
- ecntrl |= ECNTRL_R100;
- else
- ecntrl &= ~(ECNTRL_R100);
- break;
- default:
- netif_warn(priv, link, dev,
- "Ack! Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
-
- priv->oldspeed = phydev->speed;
- }
-
- tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
- tempval1 |= gfar_get_flowctrl_cfg(priv);
-
- gfar_write(&regs->maccfg1, tempval1);
- gfar_write(&regs->maccfg2, tempval);
- gfar_write(&regs->ecntrl, ecntrl);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
+ if (unlikely(phydev->link != priv->oldlink ||
+ phydev->duplex != priv->oldduplex ||
+ phydev->speed != priv->oldspeed))
+ gfar_update_link_state(priv);
}
/* Update the hash table based on the current list of multicast
@@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+ u32 val = 0;
+
+ if (!phydev->duplex)
+ return val;
+
+ if (!priv->pause_aneg_en) {
+ if (priv->tx_pause_en)
+ val |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ val |= MACCFG1_RX_FLOW;
+ } else {
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_TX)
+ val |= MACCFG1_TX_FLOW;
+ if (flowctrl & FLOW_CTRL_RX)
+ val |= MACCFG1_RX_FLOW;
+ }
+
+ return val;
+}
+
+static noinline void gfar_update_link_state(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ struct phy_device *phydev = priv->phydev;
+
+ if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
+ return;
+
+ if (phydev->link) {
+ u32 tempval1 = gfar_read(&regs->maccfg1);
+ u32 tempval = gfar_read(&regs->maccfg2);
+ u32 ecntrl = gfar_read(&regs->ecntrl);
+
+ if (phydev->duplex != priv->oldduplex) {
+ if (!(phydev->duplex))
+ tempval &= ~(MACCFG2_FULL_DUPLEX);
+ else
+ tempval |= MACCFG2_FULL_DUPLEX;
+
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->oldspeed) {
+ switch (phydev->speed) {
+ case 1000:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
+
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ case 100:
+ case 10:
+ tempval =
+ ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
+
+ /* Reduced mode distinguishes
+ * between 10 and 100
+ */
+ if (phydev->speed == SPEED_100)
+ ecntrl |= ECNTRL_R100;
+ else
+ ecntrl &= ~(ECNTRL_R100);
+ break;
+ default:
+ netif_warn(priv, link, priv->ndev,
+ "Ack! Speed (%d) is not 10/100/1000!\n",
+ phydev->speed);
+ break;
+ }
+
+ priv->oldspeed = phydev->speed;
+ }
+
+ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+ gfar_write(&regs->maccfg1, tempval1);
+ gfar_write(&regs->maccfg2, tempval);
+ gfar_write(&regs->ecntrl, ecntrl);
+
+ if (!priv->oldlink)
+ priv->oldlink = 1;
+
+ } else if (priv->oldlink) {
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (netif_msg_link(priv))
+ phy_print_status(phydev);
+}
+
static struct of_device_id gfar_match[] =
{
{
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 891dbee6e6c1..76d70708f864 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 oldadv, newadv;
+ if (!phydev)
+ return -ENODEV;
+
if (!(phydev->supported & SUPPORTED_Pause) ||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
(epause->rx_pause != epause->tx_pause)))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index a2901139b209..5f5539561661 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
u16 phy_reg = 0;
u32 phy_id = 0;
- s32 ret_val;
+ s32 ret_val = 0;
u16 retry_count;
u32 mac_reg = 0;
@@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
- hw->phy.ops.release(hw);
- ret_val = e1000_set_mdio_slow_mode_hv(hw);
- if (!ret_val)
- ret_val = e1000e_get_phy_id(hw);
- hw->phy.ops.acquire(hw);
+ if (hw->mac.type < e1000_pch_lpt) {
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+ }
if (ret_val)
return false;
@@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
}
}
+ if (hw->phy.type == e1000_phy_82579) {
+ ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ &data);
+ if (ret_val)
+ goto release;
+
+ data &= ~I82579_LPI_100_PLL_SHUT;
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
+ data);
+ }
+
/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
if (ret_val)
@@ -1314,15 +1327,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+ /* When connected at 10Mbps half-duplex, some parts are excessively
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if ((hw->mac.type == e1000_pch2lan) && link) {
+ if (((hw->mac.type == e1000_pch2lan) ||
+ (hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ u16 emi_addr;
+
reg = er32(TIPG);
reg &= ~E1000_TIPG_IPGT_MASK;
reg |= 0xFF;
@@ -1333,8 +1349,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val =
- e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+ if (hw->mac.type == e1000_pch2lan)
+ emi_addr = I82579_RX_CONFIG;
+ else
+ emi_addr = I217_RX_CONFIG;
+
+ ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
hw->phy.ops.release(hw);
@@ -2494,51 +2514,44 @@ release:
* e1000_k1_gig_workaround_lv - K1 Si workaround
* @hw: pointer to the HW structure
*
- * Workaround to set the K1 beacon duration for 82579 parts
+ * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
+ * Disable K1 in 1000Mbps and 100Mbps
**/
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
{
s32 ret_val = 0;
u16 status_reg = 0;
- u32 mac_reg;
- u16 phy_reg;
if (hw->mac.type != e1000_pch2lan)
return 0;
- /* Set K1 beacon duration based on 1Gbps speed or otherwise */
+ /* Set K1 beacon duration based on 10Mbs speed */
ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
if (ret_val)
return ret_val;
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
- mac_reg = er32(FEXTNVM4);
- mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
-
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
- if (ret_val)
- return ret_val;
-
- if (status_reg & HV_M_STATUS_SPEED_1000) {
+ if (status_reg &
+ (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
u16 pm_phy_reg;
- mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
- phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
- /* LV 1G Packet drop issue wa */
+ /* LV 1G/100 Packet drop issue wa */
ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
if (ret_val)
return ret_val;
- pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
+ pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
if (ret_val)
return ret_val;
} else {
+ u32 mac_reg;
+
+ mac_reg = er32(FEXTNVM4);
+ mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
- phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+ ew32(FEXTNVM4, mac_reg);
}
- ew32(FEXTNVM4, mac_reg);
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
}
return ret_val;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index bead50f9187b..5515126c81c1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -232,16 +232,19 @@
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+#define I217_RX_CONFIG 0xB20C /* Receive configuration */
#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 3841bccf058c..537d2780b408 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
#define HV_M_STATUS_SPEED_MASK 0x0300
#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_SPEED_100 0x0100
#define HV_M_STATUS_LINK_UP 0x0040
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 109052a19bd9..e399f9b70777 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2899,12 +2899,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
- ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
+ icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
i40e_ptp_tx_hwtstamp(pf);
- prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
}
-
- wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
}
/* If a critical error is pending we have no choice but to reset the
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b0c6050479eb..6e664d9038d6 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme,
return idx;
}
-static void
+static int
jme_fill_tx_map(struct pci_dev *pdev,
struct txdesc *txdesc,
struct jme_buffer_info *txbi,
@@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev,
len,
PCI_DMA_TODEVICE);
+ if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+ return -EINVAL;
+
pci_dma_sync_single_for_device(pdev,
dmaaddr,
len,
@@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev,
txbi->mapping = dmaaddr;
txbi->len = len;
+ return 0;
}
-static void
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx)
+{
+ struct jme_ring *txring = &(jme->txring[0]);
+ struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
+ int mask = jme->tx_ring_mask;
+ int j;
+
+ for (j = startidx ; j < endidx ; ++j) {
+ ctxbi = txbi + ((startidx + j + 2) & (mask));
+ pci_unmap_page(jme->pdev,
+ ctxbi->mapping,
+ ctxbi->len,
+ PCI_DMA_TODEVICE);
+
+ ctxbi->mapping = 0;
+ ctxbi->len = 0;
+ }
+
+}
+
+static int
jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
{
struct jme_ring *txring = &(jme->txring[0]);
@@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
u32 len;
+ int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
frag = &skb_shinfo(skb)->frags[i];
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
skb_frag_page(frag),
frag->page_offset, skb_frag_size(frag), hidma);
+ if (ret) {
+ jme_drop_tx_map(jme, idx, idx+i);
+ goto out;
+ }
+
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
ctxdesc = txdesc + ((idx + 1) & (mask));
ctxbi = txbi + ((idx + 1) & (mask));
- jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
+ ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
offset_in_page(skb->data), len, hidma);
+ if (ret)
+ jme_drop_tx_map(jme, idx, idx+i);
+
+out:
+ return ret;
}
+
static int
jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
@@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct txdesc *txdesc;
struct jme_buffer_info *txbi;
u8 flags;
+ int ret = 0;
txdesc = (struct txdesc *)txring->desc + idx;
txbi = txring->bufinf + idx;
@@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
jme_tx_csum(jme, skb, &flags);
jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
- jme_map_tx_skb(jme, skb, idx);
+ ret = jme_map_tx_skb(jme, skb, idx);
+ if (ret)
+ return ret;
+
txdesc->desc1.flags = flags;
/*
* Set tx buffer info after telling NIC to send
@@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- jme_fill_tx_desc(jme, skb, idx);
+ if (jme_fill_tx_desc(jme, skb, idx))
+ return NETDEV_TX_BUSY;
jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index b161a525fc5b..9d5ced263a5e 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
clk_prepare_enable(dev->clk);
dev->err_interrupt = platform_get_irq(pdev, 0);
- if (dev->err_interrupt != -ENXIO) {
+ if (dev->err_interrupt > 0) {
ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
orion_mdio_err_irq,
IRQF_SHARED, pdev->name, dev);
@@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
writel(MVMDIO_ERR_INT_SMI_DONE,
dev->regs + MVMDIO_ERR_INT_MASK);
+
+ } else if (dev->err_interrupt == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
}
mutex_init(&dev->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index df2c1fbf75ec..5db97a4fdc01 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -746,10 +746,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
has_eth_port = true;
}
- if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
- request_module_nowait(IB_DRV_NAME);
if (has_eth_port)
request_module_nowait(EN_DRV_NAME);
+ if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+ request_module_nowait(IB_DRV_NAME);
}
/*
@@ -2406,7 +2406,8 @@ slave_start:
* No return code for this call, just warn the user in case of PCI
* express device capabilities are under-satisfied by the bus.
*/
- mlx4_check_pcie_caps(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_check_pcie_caps(dev);
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1f6d29183f1d..376f2f1d445e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1104,6 +1104,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
}
if (found_ix >= 0) {
+ /* Calculate a slave_gid which is the slave number in the gid
+ * table and not a globally unique slave number.
+ */
if (found_ix < MLX4_ROCE_PF_GIDS)
slave_gid = 0;
else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
@@ -1116,41 +1119,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
(vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
+ /* Calculate the globally unique slave id */
if (slave_gid) {
struct mlx4_active_ports exclusive_ports;
struct mlx4_active_ports actv_ports;
struct mlx4_slaves_pport slaves_pport_actv;
unsigned max_port_p_one;
- int num_slaves_before = 1;
+ int num_vfs_before = 0;
+ int candidate_slave_gid;
+ /* Calculate how many VFs are on the previous port, if exists */
for (i = 1; i < port; i++) {
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
- set_bit(i, exclusive_ports.ports);
+ set_bit(i - 1, exclusive_ports.ports);
slaves_pport_actv =
mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
- num_slaves_before += bitmap_weight(
+ num_vfs_before += bitmap_weight(
slaves_pport_actv.slaves,
dev->num_vfs + 1);
}
- if (slave_gid < num_slaves_before) {
- bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
- set_bit(port - 1, exclusive_ports.ports);
- slaves_pport_actv =
- mlx4_phys_to_slaves_pport_actv(
- dev, &exclusive_ports);
- slave_gid += bitmap_weight(
- slaves_pport_actv.slaves,
- dev->num_vfs + 1) -
- num_slaves_before;
- }
- actv_ports = mlx4_get_active_ports(dev, slave_gid);
+ /* candidate_slave_gid isn't necessarily the correct slave, but
+ * it has the same number of ports and is assigned to the same
+ * ports as the real slave we're looking for. On dual port VF,
+ * slave_gid = [single port VFs on port <port>] +
+ * [offset of the current slave from the first dual port VF] +
+ * 1 (for the PF).
+ */
+ candidate_slave_gid = slave_gid + num_vfs_before;
+
+ actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
max_port_p_one = find_first_bit(
actv_ports.ports, dev->caps.num_ports) +
bitmap_weight(actv_ports.ports,
dev->caps.num_ports) + 1;
+ /* Calculate the real slave number */
for (i = 1; i < max_port_p_one; i++) {
if (i == port)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 12fa515a7dd8..a95df9d2645d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
}
+static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
+ u8 *gid, enum mlx4_protocol prot)
+{
+ int real_port;
+
+ if (prot != MLX4_PROT_ETH)
+ return 0;
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
+ dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
+ if (real_port < 0)
+ return -EINVAL;
+ gid[5] = real_port;
+ }
+
+ return 0;
+}
+
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
if (err)
goto ex_detach;
} else {
+ err = mlx4_adjust_port(dev, slave, gid, prot);
+ if (err)
+ goto ex_put;
+
err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
if (err)
goto ex_put;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 6cb24dc864ba..6e7527e2b595 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1722,22 +1722,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
-static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
- struct net_device *netdev)
-{
- int err;
-
- netdev->num_tx_queues = adapter->drv_tx_rings;
- netdev->real_num_tx_queues = adapter->drv_tx_rings;
-
- err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
- if (err)
- netdev_err(netdev, "failed to set %d Tx queues\n",
- adapter->drv_tx_rings);
-
- return err;
-}
-
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 8a570fa3542b..d2e18b52caba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2224,6 +2224,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
ahw->max_uc_count = count;
}
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ u8 tx_queues, u8 rx_queues)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (tx_queues) {
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ tx_queues);
+ return err;
+ }
+ }
+
+ if (rx_queues) {
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev, "failed to set %d Rx queues\n",
+ rx_queues);
+ }
+
+ return err;
+}
+
int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int pci_using_dac)
@@ -2287,7 +2312,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
- err = qlcnic_set_real_num_queues(adapter, netdev);
+ err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
if (err)
return err;
@@ -2392,6 +2418,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_api_lock(adapter);
+ qlcnic_api_unlock(adapter);
+}
+
+
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2491,6 +2525,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_82xx_check(adapter)) {
qlcnic_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
+ qlcnic_reset_api_lock(adapter);
err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
@@ -2958,9 +2993,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
tx_ring->tx_stats.xmit_called,
tx_ring->tx_stats.xmit_on,
tx_ring->tx_stats.xmit_off);
+
+ if (tx_ring->crb_intr_mask)
+ netdev_info(netdev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+
netdev_info(netdev,
- "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
- readl(tx_ring->crb_intr_mask),
+ "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
readl(tx_ring->crb_cmd_producer),
tx_ring->producer, tx_ring->sw_consumer,
le32_to_cpu(*(tx_ring->hw_consumer)));
@@ -3993,12 +4032,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ u8 tx_rings, rx_rings;
int err;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ tx_rings = adapter->drv_tss_rings;
+ rx_rings = adapter->drv_rss_rings;
+
netif_device_detach(netdev);
+
+ err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+ if (err)
+ goto done;
+
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -4018,7 +4066,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
return err;
}
- netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings);
+ /* Check if we need to update real_num_{tx|rx}_queues because
+ * qlcnic_setup_intr() may change Tx/Rx rings size
+ */
+ if ((tx_rings != adapter->drv_tx_rings) ||
+ (rx_rings != adapter->drv_sds_rings)) {
+ err = qlcnic_set_real_num_queues(adapter,
+ adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ goto done;
+ }
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_initialize_nic(adapter, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 3b39ab2ad5e9..498fa6350c8d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -1376,7 +1376,7 @@ static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
rsp = qlcnic_sriov_alloc_bc_trans(&trans);
if (rsp)
- return rsp;
+ goto free_cmd;
rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
if (rsp)
@@ -1436,6 +1436,13 @@ err_out:
cleanup_transaction:
qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ }
+
return rsp;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 6203c7d8550f..45019649bbbd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -358,6 +358,8 @@ struct sxgbe_core_ops {
/* Enable disable checksum offload operations */
void (*enable_rx_csum)(void __iomem *ioaddr);
void (*disable_rx_csum)(void __iomem *ioaddr);
+ void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
+ void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
};
const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index c4da7a2b002a..58c35692560e 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
}
+static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
+ reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
+ reg_val |= SXGBE_CORE_RXQ_ENABLE;
+ writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
+}
+
+static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
+{
+ u32 reg_val;
+
+ reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
+ reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
+ reg_val |= SXGBE_CORE_RXQ_DISABLE;
+ writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
+}
+
static void sxgbe_set_eee_mode(void __iomem *ioaddr)
{
u32 ctrl;
@@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
.set_eee_pls = sxgbe_set_eee_pls,
.enable_rx_csum = sxgbe_enable_rx_csum,
.disable_rx_csum = sxgbe_disable_rx_csum,
+ .enable_rxqueue = sxgbe_core_enable_rxqueue,
+ .disable_rxqueue = sxgbe_core_disable_rxqueue,
};
const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
index d71691be4136..2686bb5b6765 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
@@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
p->rdes23.rx_rd_des23.own_bit = 1;
}
+/* Set Interrupt on completion bit */
+static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
+{
+ p->rdes23.rx_rd_des23.int_on_com = 1;
+}
+
/* Get the receive frame size */
static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
{
@@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
.init_rx_desc = sxgbe_init_rx_desc,
.get_rx_owner = sxgbe_get_rx_owner,
.set_rx_owner = sxgbe_set_rx_owner,
+ .set_rx_int_on_com = sxgbe_set_rx_int_on_com,
.get_rx_frame_len = sxgbe_get_rx_frame_len,
.get_rx_fd_status = sxgbe_get_rx_fd_status,
.get_rx_ld_status = sxgbe_get_rx_ld_status,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
index 022630098aea..18609324db72 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
@@ -258,6 +258,9 @@ struct sxgbe_desc_ops {
/* Set own bit */
void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
+ /* Set Interrupt on completion bit */
+ void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
+
/* Get the receive frame size */
int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
index 4d989ff6c978..bb9b5b8afc5f 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
@@ -23,21 +23,8 @@
/* DMA core initialization */
static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
{
- int retry_count = 10;
u32 reg_val;
- /* reset the DMA */
- writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
- while (retry_count--) {
- if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
- SXGBE_DMA_SOFT_RESET))
- break;
- mdelay(10);
- }
-
- if (retry_count < 0)
- return -EBUSY;
-
reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
/* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 137f366ec7e4..698494481d18 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
+ }
/* Request the IRQ lines */
ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
@@ -1452,6 +1455,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
/* Added memory barrier for RX descriptor modification */
wmb();
priv->hw->desc->set_rx_owner(p);
+ priv->hw->desc->set_rx_int_on_com(p);
/* Added memory barrier for RX descriptor modification */
wmb();
}
@@ -2034,6 +2038,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
return 0;
}
+static int sxgbe_sw_reset(void __iomem *addr)
+{
+ int retry_count = 10;
+
+ writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
+ while (retry_count--) {
+ if (!(readl(addr + SXGBE_DMA_MODE_REG) &
+ SXGBE_DMA_SOFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (retry_count < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
/**
* sxgbe_drv_probe
* @device: device pointer
@@ -2066,6 +2088,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
priv->plat = plat_dat;
priv->ioaddr = addr;
+ ret = sxgbe_sw_reset(priv->ioaddr);
+ if (ret)
+ goto error_free_netdev;
+
/* Verify driver arguments */
sxgbe_verify_args();
@@ -2182,9 +2208,14 @@ error_free_netdev:
int sxgbe_drv_remove(struct net_device *ndev)
{
struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ u8 queue_num;
netdev_info(ndev, "%s: removing driver\n", __func__);
+ SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+ priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
+ }
+
priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 5a89acb4c505..56f8bf5a3f1b 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -52,6 +52,10 @@
#define SXGBE_CORE_RX_CTL2_REG 0x00A8
#define SXGBE_CORE_RX_CTL3_REG 0x00AC
+#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003
+#define SXGBE_CORE_RXQ_ENABLE 0x0002
+#define SXGBE_CORE_RXQ_DISABLE 0x0000
+
/* Interrupt Registers */
#define SXGBE_CORE_INT_STATUS_REG 0x00B0
#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index df8d383acf48..b9ac20f42651 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp)
int i;
for (i = 0; i < N_TX_RINGS; i++)
- spin_lock(&cp->tx_lock[i]);
+ spin_lock_nested(&cp->tx_lock[i], i);
}
static inline void cas_lock_all(struct cas *cp)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d14c8da53160..91499be03c6f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1899,18 +1899,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
-
- if (strncmp(mdio->name, "gpio", 4) == 0) {
- /* GPIO bitbang MDIO driver attached */
- struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
-
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, bus->id, phyid);
- } else {
- /* davinci MDIO driver attached */
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+ of_node_put(mdio_node);
+ if (!mdio) {
+ pr_err("Missing mdio platform device\n");
+ return -EINVAL;
}
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 083d084396d3..1de3ef5dd5d2 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -467,6 +467,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (skb_is_gso(skb))
goto do_lso;
+ if ((skb->ip_summed == CHECKSUM_NONE) ||
+ (skb->ip_summed == CHECKSUM_UNNECESSARY))
+ goto do_send;
+
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index cfb27c865417..f0118d1a3e46 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -332,11 +332,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
- __u8 ip_summed = skb->ip_summed;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
/* send to other bridge ports directly */
if (is_multicast_ether_addr(eth->h_dest)) {
@@ -354,7 +352,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
- skb->ip_summed = ip_summed;
skb->dev = vlan->lowerdev;
return dev_queue_xmit(skb);
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ff111a89e17f..3381c4f91a8c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
segs = nskb;
}
} else {
+ /* If we receive a partial checksum and the tap side
+ * doesn't support checksum offload, compute the checksum.
+ * Note: it doesn't matter which checksum feature to
+ * check, we either support them all or none.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ !(features & NETIF_F_ALL_CSUM) &&
+ skb_checksum_help(skb))
+ goto drop;
skb_queue_tail(&q->sk.sk_receive_queue, skb);
}
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 9c4defdec67b..5f1a2250018f 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
pdata = mdio_gpio_of_get_data(pdev);
bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+ if (bus_id < 0) {
+ dev_warn(&pdev->dev, "failed to get alias id\n");
+ bus_id = 0;
+ }
} else {
pdata = dev_get_platdata(&pdev->dev);
bus_id = pdev->id;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1b6d09aef427..a972056b2249 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
break;
if (phydev->link) {
+ if (AUTONEG_ENABLE == phydev->autoneg) {
+ err = phy_aneg_done(phydev);
+ if (err < 0)
+ break;
+
+ if (!err) {
+ phydev->state = PHY_AN;
+ phydev->link_timeout = PHY_AN_TIMEOUT;
+ break;
+ }
+ }
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index cc70ecfc7062..ad4a94e9ff57 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
return;
- spin_lock(&sl->lock);
+ spin_lock_bh(&sl->lock);
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_unlock(&sl->lock);
+ spin_unlock_bh(&sl->lock);
sl_unlock(sl);
return;
}
@@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
actual = tty->ops->write(tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
- spin_unlock(&sl->lock);
+ spin_unlock_bh(&sl->lock);
}
static void sl_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 33008c1d1d67..767fe61b5ac9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_UP:
if (netif_carrier_ok(dev))
team_port_change_check(port, true);
+ break;
case NETDEV_DOWN:
team_port_change_check(port, false);
+ break;
case NETDEV_CHANGE:
if (netif_running(port->dev))
team_port_change_check(port,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c9f3281506af..2e025ddcef21 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
cdc_ncm_unbind(dev, intf);
}
+/* verify that the ethernet protocol is IPv4 or IPv6 */
+static bool is_ip_proto(__be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ return true;
+ }
+ return false;
+}
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
@@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
struct cdc_ncm_ctx *ctx = info->ctx;
__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
u16 tci = 0;
+ bool is_ip;
u8 *c;
if (!ctx)
@@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
if (skb->len <= ETH_HLEN)
goto error;
+ /* Some applications using e.g. packet sockets will
+ * bypass the VLAN acceleration and create tagged
+ * ethernet frames directly. We primarily look for
+ * the accelerated out-of-band tag, but fall back if
+ * required
+ */
+ skb_reset_mac_header(skb);
+ if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
+ __vlan_get_tag(skb, &tci) == 0) {
+ is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
+ skb_pull(skb, VLAN_ETH_HLEN);
+ } else {
+ is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
+ skb_pull(skb, ETH_HLEN);
+ }
+
/* mapping VLANs to MBIM sessions:
* no tag => IPS session <0>
* 1 - 255 => IPS session <vlanid>
* 256 - 511 => DSS session <vlanid - 256>
* 512 - 4095 => unsupported, drop
*/
- vlan_get_tag(skb, &tci);
-
switch (tci & 0x0f00) {
case 0x0000: /* VLAN ID 0 - 255 */
- /* verify that datagram is IPv4 or IPv6 */
- skb_reset_mac_header(skb);
- switch (eth_hdr(skb)->h_proto) {
- case htons(ETH_P_IP):
- case htons(ETH_P_IPV6):
- break;
- default:
+ if (!is_ip)
goto error;
- }
c = (u8 *)&sign;
c[3] = tci;
break;
@@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
"unsupported tci=0x%04x\n", tci);
goto error;
}
- skb_pull(skb, ETH_HLEN);
}
spin_lock_bh(&ctx->mtx);
@@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
return;
/* need to send the NA on the VLAN dev, if any */
- if (tci)
+ rcu_read_lock();
+ if (tci) {
netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q),
tci);
- else
+ if (!netdev) {
+ rcu_read_unlock();
+ return;
+ }
+ } else {
netdev = dev->net;
- if (!netdev)
- return;
+ }
+ dev_hold(netdev);
+ rcu_read_unlock();
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
- return;
+ goto out;
is_router = !!in6_dev->cnf.forwarding;
in6_dev_put(in6_dev);
@@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
true /* solicited */,
false /* override */,
true /* inc_opt */);
+out:
+ dev_put(netdev);
}
static bool is_neigh_solicit(u8 *buf, size_t len)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 549dbac710ed..9a2bd11943eb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -785,7 +785,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
skb_out->len > CDC_NCM_MIN_TX_PKT)
memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
ctx->tx_max - skb_out->len);
- else if ((skb_out->len % dev->maxpacket) == 0)
+ else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
*skb_put(skb_out, 1) = 0; /* force short packet */
/* set final frame length */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e3458e3c44f1..83208d4fdc59 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
+ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
+ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
+ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
+ {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
+ {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
+ {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
+ {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
+ {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
+ {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
+ {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
+ {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
+ {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
+ {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
+ {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
+ {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
+ {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
@@ -730,16 +746,28 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
+ {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
+ {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
+ {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
{QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
+ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index a0398fe3eb28..be3eb2a8d602 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
int irq;
int ret = 0;
struct ath_hw *ah;
- struct ath_common *common;
char hw_name[64];
if (!dev_get_platdata(&pdev->dev)) {
@@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)mem, irq);
- common = ath9k_hw_common(sc->sc_ah);
- /* Will be cleared in ath9k_start() */
- set_bit(ATH_OP_INVALID, &common->op_flags);
return 0;
err_irq:
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 6d47783f2e5b..ba502a2d199b 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
ATH9K_ANI_RSSI_THR_LOW,
ATH9K_ANI_RSSI_THR_HIGH);
+ if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
+ immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
+
if (!scan)
aniState->ofdmNoiseImmunityLevel = immunityLevel;
@@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
ATH9K_ANI_RSSI_THR_HIGH);
+ if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
+ immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
+
if (ah->opmode == NL80211_IFTYPE_STATION &&
BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 05935f638525..33a2ae77b595 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -254,7 +254,6 @@ struct ath_atx_tid {
s8 bar_index;
bool sched;
- bool paused;
bool active;
};
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index d76e6e0120d2..ffca918ff16a 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
ath_txq_lock(sc, txq);
if (tid->active) {
len += scnprintf(buf + len, size - len,
- "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+ "%3d%11d%10d%10d%10d%10d%9d%6d\n",
tid->tidno,
tid->seq_start,
tid->seq_next,
@@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
tid->baw_head,
tid->baw_tail,
tid->bar_index,
- tid->sched,
- tid->paused);
+ tid->sched);
}
ath_txq_unlock(sc, txq);
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 21e174cfc909..4243509616bd 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -786,6 +786,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
common = ath9k_hw_common(ah);
ath9k_set_hw_capab(sc, hw);
+ /* Will be cleared in ath9k_start() */
+ set_bit(ATH_OP_INVALID, &common->op_flags);
+
/* Initialize regulatory */
error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
ath9k_reg_notifier);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 25304adece57..914dbc6b1720 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ath_softc *sc;
struct ieee80211_hw *hw;
- struct ath_common *common;
u8 csz;
u32 val;
int ret = 0;
@@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
hw_name, (unsigned long)sc->mem, pdev->irq);
- /* Will be cleared in ath9k_start() */
- common = ath9k_hw_common(sc->sc_ah);
- set_bit(ATH_OP_INVALID, &common->op_flags);
-
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a01efd3e741e..441c71448e4c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -978,6 +978,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
u64 tsf = 0;
unsigned long flags;
dma_addr_t new_buf_addr;
+ unsigned int budget = 512;
if (edma)
dma_type = DMA_BIDIRECTIONAL;
@@ -1116,15 +1117,17 @@ requeue_drop_frag:
}
requeue:
list_add_tail(&bf->list, &sc->rx.rxbuf);
- if (flush)
- continue;
if (edma) {
ath_rx_edma_buf_link(sc, qtype);
} else {
ath_rx_buf_relink(sc, bf);
- ath9k_hw_rxena(ah);
+ if (!flush)
+ ath9k_hw_rxena(ah);
}
+
+ if (!budget--)
+ break;
} while (1);
if (!(ah->imask & ATH9K_INT_RXEOL)) {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 87cbec47fb48..66acb2cbd9df 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
- if (tid->paused)
- return;
-
if (tid->sched)
return;
@@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
ath_tx_tid_change_state(sc, txtid);
txtid->active = true;
- txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
txtid->bar_index = -1;
@@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
ath_txq_lock(sc, txq);
txtid->active = false;
- txtid->paused = false;
ath_tx_flush_tid(sc, txtid);
ath_tx_tid_change_state(sc, txtid);
ath_txq_unlock_complete(sc, txq);
@@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
- if (!tid->paused && ath_tid_has_buffered(tid)) {
+ if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
ath_txq_lock(sc, txq);
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
- tid->paused = false;
if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
@@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
continue;
tid = ATH_AN_2_TID(an, i);
- if (tid->paused)
- continue;
ath_txq_lock(sc, tid->ac->txq);
while (nframes > 0) {
@@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
list_del(&tid->list);
tid->sched = false;
- if (tid->paused)
- continue;
-
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
sent = true;
@@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->baw_size = WME_MAX_BA;
tid->baw_head = tid->baw_tail = 0;
tid->sched = false;
- tid->paused = false;
tid->active = false;
__skb_queue_head_init(&tid->buf_q);
__skb_queue_head_init(&tid->retry_q);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
index df130ef53d1c..c7c9f15c0fe0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
ci = core->chip;
- /* if core is already in reset, just return */
+ /* if core is already in reset, skip reset */
regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
- return;
+ goto in_reset_configure;
/* configure reset */
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
@@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
BCMA_RESET_CTL_RESET, 300);
+in_reset_configure:
/* in-reset configure */
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 30a2367ba8d6..212ac4842c16 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -621,20 +621,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
/*
- * Update the beacon. This is only required on USB devices. PCI
- * devices fetch beacons periodically.
- */
- if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
- rt2x00queue_update_beacon(rt2x00dev, vif);
-
- /*
* Start/stop beaconing.
*/
if (changes & BSS_CHANGED_BEACON_ENABLED) {
if (!bss_conf->enable_beacon && intf->enable_beacon) {
- rt2x00queue_clear_beacon(rt2x00dev, vif);
rt2x00dev->intf_beaconing--;
intf->enable_beacon = false;
+ /*
+ * Clear beacon in the H/W for this vif. This is needed
+ * to disable beaconing on this particular interface
+ * and keep it running on other interfaces.
+ */
+ rt2x00queue_clear_beacon(rt2x00dev, vif);
if (rt2x00dev->intf_beaconing == 0) {
/*
@@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
rt2x00queue_stop_queue(rt2x00dev->bcn);
mutex_unlock(&intf->beacon_skb_mutex);
}
-
-
} else if (bss_conf->enable_beacon && !intf->enable_beacon) {
rt2x00dev->intf_beaconing++;
intf->enable_beacon = true;
+ /*
+ * Upload beacon to the H/W. This is only required on
+ * USB devices. PCI devices fetch beacons periodically.
+ */
+ if (rt2x00_is_usb(rt2x00dev))
+ rt2x00queue_update_beacon(rt2x00dev, vif);
if (rt2x00dev->intf_beaconing == 1) {
/*
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index 06ef47cd6203..5b4c225396f2 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *psaddr;
__le16 fc;
u16 type, ufc;
- bool match_bssid, packet_toself, packet_beacon, addr;
+ bool match_bssid, packet_toself, packet_beacon = false, addr;
tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 68b5c7e92cfb..07cb06da6729 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
err = _rtl92cu_init_mac(hw);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
- return err;
+ goto exit;
}
err = rtl92c_download_fw(hw);
if (err) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 36b48be8329c..2b3c78baa9f8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
if (ieee80211_is_nullfunc(fc))
return QSLT_HIGH;
+ /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
+ * queue V0 at priority 7; however, the RTL8192SE appears to have
+ * that queue at priority 6
+ */
+ if (skb->priority == 7)
+ return QSLT_VO;
return skb->priority;
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 9bcf2cf19357..5aeb89411350 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
memset(r, 0, sizeof(*r));
/*
- * Get optional "interrupts-names" property to add a name
+ * Get optional "interrupt-names" property to add a name
* to the resource.
*/
of_property_read_string_index(dev, "interrupt-names", index,
@@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
+ * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ *
+ * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+ * is not yet created.
+ *
+ */
+int of_irq_get(struct device_node *dev, int index)
+{
+ int rc;
+ struct of_phandle_args oirq;
+ struct irq_domain *domain;
+
+ rc = of_irq_parse_one(dev, index, &oirq);
+ if (rc)
+ return rc;
+
+ domain = irq_find_host(oirq.np);
+ if (!domain)
+ return -EPROBE_DEFER;
+
+ return irq_create_of_mapping(&oirq);
+}
+
+/**
* of_irq_count - Count the number of IRQs a node uses
* @dev: pointer to device tree node
*/
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 404d1daebefa..bd47fbc53dc9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
rc = of_address_to_resource(np, i, res);
WARN_ON(rc);
}
- WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+ if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
+ pr_debug("not all legacy IRQ resources mapped for %s\n",
+ np->name);
}
dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index ae4450070503..fe70b86bcffb 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
}
}
+static void __init of_selftest_platform_populate(void)
+{
+ int irq;
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = of_find_node_by_path("/testcase-data");
+ of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+
+ /* Test that a missing irq domain returns -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device1");
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ selftest(0, "device 1 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq != -EPROBE_DEFER)
+ selftest(0, "device deferred probe failed - %d\n", irq);
+
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ selftest(0, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 || irq == -EPROBE_DEFER)
+ selftest(0, "device parsing error failed - %d\n", irq);
+
+ selftest(1, "passed");
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -445,6 +476,7 @@ static int __init of_selftest(void)
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
of_selftest_match_node();
+ of_selftest_platform_populate();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
index c843720bd3e5..da4695f60351 100644
--- a/drivers/of/testcase-data/tests-interrupts.dtsi
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
@@ -54,5 +54,18 @@
<&test_intmap1 1 2>;
};
};
+
+ testcase-device1 {
+ compatible = "testcase-device";
+ interrupt-parent = <&test_intc0>;
+ interrupts = <1>;
+ };
+
+ testcase-device2 {
+ compatible = "testcase-device";
+ interrupt-parent = <&test_intc2>;
+ interrupts = <1>; /* invalid specifier - too short */
+ };
};
+
};
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 3bb05f17b9b4..4906c27fa3bd 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -33,6 +33,7 @@ config PHY_MVEBU_SATA
config OMAP_CONTROL_PHY
tristate "OMAP CONTROL PHY Driver"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
Enable this to add support for the PHY part present in the control
module. This driver has API to power on the USB2 PHY and to write to
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 2faf78edc864..7728518572a4 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
-obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-samsung-usb2.o
-obj-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
-obj-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
-obj-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
+obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
+phy-exynos-usb2-y += phy-samsung-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 623b71c54b3e..c64a2f3b2d62 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -64,6 +64,9 @@ static struct phy *phy_lookup(struct device *device, const char *port)
class_dev_iter_init(&iter, phy_class, NULL, NULL);
while ((dev = class_dev_iter_next(&iter))) {
phy = to_phy(dev);
+
+ if (!phy->init_data)
+ continue;
count = phy->init_data->num_consumers;
consumers = phy->init_data->consumers;
while (count--) {
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 92ed4b2e3c07..c862f9c0e9ce 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -64,7 +64,6 @@ struct as3722_pin_function {
};
struct as3722_gpio_pin_control {
- bool enable_gpio_invert;
unsigned mode_prop;
int io_function;
};
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
return mode;
}
- if (as_pci->gpio_control[offset].enable_gpio_invert)
- mode |= AS3722_GPIO_INV;
-
- return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+ return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
+ AS3722_GPIO_MODE_MASK, mode);
}
static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
{
struct as3722_pctrl_info *as_pci = to_as_pci(chip);
struct as3722 *as3722 = as_pci->as3722;
- int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+ int en_invert;
u32 val;
int ret;
+ ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
+ if (ret < 0) {
+ dev_err(as_pci->dev,
+ "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+ return;
+ }
+ en_invert = !!(val & AS3722_GPIO_INV);
+
if (value)
val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
else
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 81075f2a1d3f..2960557bfed9 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
unsigned pin_pos)
{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
struct pinctrl_pin_desc *pin;
struct pcs_name *pn;
int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
return -ENOMEM;
}
+ if (pcs_soc->irq_enable_mask) {
+ unsigned val;
+
+ val = pcs->read(pcs->base + offset);
+ if (val & pcs_soc->irq_enable_mask) {
+ dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
+ (unsigned long)pcs->res->start + offset, val);
+ val &= ~pcs_soc->irq_enable_mask;
+ pcs->write(val, pcs->base + offset);
+ }
+ }
+
pin = &pcs->pins.pa[i];
pn = &pcs->names[i];
sprintf(pn->name, "%lx.%d",
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index c5e0f6973a3b..26ca6855f478 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
*/
for (i = 0; i < state->pinfuncgrpcnt; i++) {
const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
- unsigned int port = pfg->port;
unsigned int mode = pfg->mode;
- int j;
+ int j, port = pfg->port;
/*
* Skip pin groups which are always mapped and don't need
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 48093719167a..f5cd3f961808 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MSIOF0_SCK_B, 0,
/* IP5_23_21 [3] */
FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
- FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B,
- FN_IERX_C, 0,
+ FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
/* IP5_20_18 [3] */
FN_WE0_N, FN_IECLK, FN_CAN_CLK,
FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 5186d70c49d4..7868bf3a0f91 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SCIF3 [2] */
FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
/* SEL_IEB [2] */
- FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
/* SEL_MMC [1] */
FN_SEL_MMC_0, FN_SEL_MMC_1,
/* SEL_SCIF5 [1] */
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9f611cbbc294..c31aa07b3ba5 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
acpi_handle handle;
- struct acpi_buffer buffer;
- int ret;
+ int ret = 0;
pnp_dbg(&dev->dev, "set resources\n");
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
if (WARN_ON_ONCE(acpi_dev != dev->data))
dev->data = acpi_dev;
- ret = pnpacpi_build_resource_template(dev, &buffer);
- if (ret)
- return ret;
- ret = pnpacpi_encode_resources(dev, &buffer);
- if (ret) {
+ if (acpi_has_method(handle, METHOD_NAME__SRS)) {
+ struct acpi_buffer buffer;
+
+ ret = pnpacpi_build_resource_template(dev, &buffer);
+ if (ret)
+ return ret;
+
+ ret = pnpacpi_encode_resources(dev, &buffer);
+ if (!ret) {
+ acpi_status status;
+
+ status = acpi_set_current_resources(handle, &buffer);
+ if (ACPI_FAILURE(status))
+ ret = -EIO;
+ }
kfree(buffer.pointer);
- return ret;
}
- if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
- ret = -EINVAL;
- else if (acpi_bus_power_manageable(handle))
+ if (!ret && acpi_bus_power_manageable(handle))
ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
- kfree(buffer.pointer);
+
return ret;
}
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
acpi_handle handle;
- int ret;
+ acpi_status status;
dev_dbg(&dev->dev, "disable resources\n");
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
}
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
- ret = 0;
if (acpi_bus_power_manageable(handle))
acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
- /* continue even if acpi_bus_set_power() fails */
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
- ret = -ENODEV;
- return ret;
+
+ /* continue even if acpi_bus_set_power() fails */
+ status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ return -ENODEV;
+
+ return 0;
}
#ifdef CONFIG_ACPI_SLEEP
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 258fef272ea7..ebf0d6710b5a 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/pnp.h>
@@ -334,6 +335,81 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
}
#endif
+#ifdef CONFIG_PCI
+/* Device IDs of parts that have 32KB MCH space */
+static const unsigned int mch_quirk_devices[] = {
+ 0x0154, /* Ivy Bridge */
+ 0x0c00, /* Haswell */
+};
+
+static struct pci_dev *get_intel_host(void)
+{
+ int i;
+ struct pci_dev *host;
+
+ for (i = 0; i < ARRAY_SIZE(mch_quirk_devices); i++) {
+ host = pci_get_device(PCI_VENDOR_ID_INTEL, mch_quirk_devices[i],
+ NULL);
+ if (host)
+ return host;
+ }
+ return NULL;
+}
+
+static void quirk_intel_mch(struct pnp_dev *dev)
+{
+ struct pci_dev *host;
+ u32 addr_lo, addr_hi;
+ struct pci_bus_region region;
+ struct resource mch;
+ struct pnp_resource *pnp_res;
+ struct resource *res;
+
+ host = get_intel_host();
+ if (!host)
+ return;
+
+ /*
+ * MCHBAR is not an architected PCI BAR, so MCH space is usually
+ * reported as a PNP0C02 resource. The MCH space was originally
+ * 16KB, but is 32KB in newer parts. Some BIOSes still report a
+ * PNP0C02 resource that is only 16KB, which means the rest of the
+ * MCH space is consumed but unreported.
+ */
+
+ /*
+ * Read MCHBAR for Host Member Mapped Register Range Base
+ * https://www-ssl.intel.com/content/www/us/en/processors/core/4th-gen-core-family-desktop-vol-2-datasheet
+ * Sec 3.1.12.
+ */
+ pci_read_config_dword(host, 0x48, &addr_lo);
+ region.start = addr_lo & ~0x7fff;
+ pci_read_config_dword(host, 0x4c, &addr_hi);
+ region.start |= (u64) addr_hi << 32;
+ region.end = region.start + 32*1024 - 1;
+
+ memset(&mch, 0, sizeof(mch));
+ mch.flags = IORESOURCE_MEM;
+ pcibios_bus_to_resource(host->bus, &mch, &region);
+
+ list_for_each_entry(pnp_res, &dev->resources, list) {
+ res = &pnp_res->res;
+ if (res->end < mch.start || res->start > mch.end)
+ continue; /* no overlap */
+ if (res->start == mch.start && res->end == mch.end)
+ continue; /* exact match */
+
+ dev_info(&dev->dev, FW_BUG "PNP resource %pR covers only part of %s Intel MCH; extending to %pR\n",
+ res, pci_name(host), &mch);
+ res->start = mch.start;
+ res->end = mch.end;
+ break;
+ }
+
+ pci_dev_put(host);
+}
+#endif
+
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -364,6 +440,9 @@ static struct pnp_fixup pnp_fixups[] = {
#ifdef CONFIG_AMD_NB
{"PNP0c01", quirk_amd_mmconfig_area},
#endif
+#ifdef CONFIG_PCI
+ {"PNP0c02", quirk_intel_mch},
+#endif
{""}
};
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 476aa495c110..b95cf71ed695 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -11,7 +11,7 @@
* Copyright (C) 2012 ARM Limited
*/
-#include <linux/jiffies.h>
+#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -23,17 +23,12 @@
static void vexpress_reset_do(struct device *dev, const char *what)
{
int err = -ENOENT;
- struct vexpress_config_func *func =
- vexpress_config_func_get_by_dev(dev);
+ struct vexpress_config_func *func = dev_get_drvdata(dev);
if (func) {
- unsigned long timeout;
-
err = vexpress_config_write(func, 0, 0);
-
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout))
- cpu_relax();
+ if (!err)
+ mdelay(1000);
}
dev_emerg(dev, "Unable to %s (%d)\n", what, err);
@@ -96,12 +91,18 @@ static int vexpress_reset_probe(struct platform_device *pdev)
enum vexpress_reset_func func;
const struct of_device_id *match =
of_match_device(vexpress_reset_of_match, &pdev->dev);
+ struct vexpress_config_func *config_func;
if (match)
func = (enum vexpress_reset_func)match->data;
else
func = pdev->id_entry->driver_data;
+ config_func = vexpress_config_func_get_by_dev(&pdev->dev);
+ if (!config_func)
+ return -EINVAL;
+ dev_set_drvdata(&pdev->dev, config_func);
+
switch (func) {
case FUNC_SHUTDOWN:
vexpress_power_off_device = &pdev->dev;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6963bdf54175..6aea373547f6 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,6 +6,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
+ depends on NET
select PPS
select NET_PTP_CLASSIFY
help
@@ -74,7 +75,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86 || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && NET
select PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index ded3b3574209..6d38be3d970c 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -38,66 +38,24 @@ struct pbias_reg_info {
struct pbias_regulator_data {
struct regulator_desc desc;
void __iomem *pbias_addr;
- unsigned int pbias_reg;
struct regulator_dev *dev;
struct regmap *syscon;
const struct pbias_reg_info *info;
int voltage;
};
-static int pbias_regulator_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV, unsigned *selector)
-{
- struct pbias_regulator_data *data = rdev_get_drvdata(dev);
- const struct pbias_reg_info *info = data->info;
- int ret, vmode;
-
- if (min_uV <= 1800000)
- vmode = 0;
- else if (min_uV > 1800000)
- vmode = info->vmode;
-
- ret = regmap_update_bits(data->syscon, data->pbias_reg,
- info->vmode, vmode);
-
- return ret;
-}
-
-static int pbias_regulator_get_voltage(struct regulator_dev *rdev)
-{
- struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
- const struct pbias_reg_info *info = data->info;
- int value, voltage;
-
- regmap_read(data->syscon, data->pbias_reg, &value);
- value &= info->vmode;
-
- voltage = value ? 3000000 : 1800000;
-
- return voltage;
-}
+static const unsigned int pbias_volt_table[] = {
+ 1800000,
+ 3000000
+};
static int pbias_regulator_enable(struct regulator_dev *rdev)
{
struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
const struct pbias_reg_info *info = data->info;
- int ret;
-
- ret = regmap_update_bits(data->syscon, data->pbias_reg,
- info->enable_mask, info->enable);
-
- return ret;
-}
-
-static int pbias_regulator_disable(struct regulator_dev *rdev)
-{
- struct pbias_regulator_data *data = rdev_get_drvdata(rdev);
- const struct pbias_reg_info *info = data->info;
- int ret;
- ret = regmap_update_bits(data->syscon, data->pbias_reg,
- info->enable_mask, 0);
- return ret;
+ return regmap_update_bits(data->syscon, rdev->desc->enable_reg,
+ info->enable_mask, info->enable);
}
static int pbias_regulator_is_enable(struct regulator_dev *rdev)
@@ -106,17 +64,18 @@ static int pbias_regulator_is_enable(struct regulator_dev *rdev)
const struct pbias_reg_info *info = data->info;
int value;
- regmap_read(data->syscon, data->pbias_reg, &value);
+ regmap_read(data->syscon, rdev->desc->enable_reg, &value);
- return (value & info->enable_mask) == info->enable_mask;
+ return (value & info->enable_mask) == info->enable;
}
static struct regulator_ops pbias_regulator_voltage_ops = {
- .set_voltage = pbias_regulator_set_voltage,
- .get_voltage = pbias_regulator_get_voltage,
- .enable = pbias_regulator_enable,
- .disable = pbias_regulator_disable,
- .is_enabled = pbias_regulator_is_enable,
+ .list_voltage = regulator_list_voltage_table,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = pbias_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .is_enabled = pbias_regulator_is_enable,
};
static const struct pbias_reg_info pbias_mmc_omap2430 = {
@@ -192,6 +151,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (IS_ERR(syscon))
return PTR_ERR(syscon);
+ cfg.regmap = syscon;
cfg.dev = &pdev->dev;
for (idx = 0; idx < PBIAS_NUM_REGS && data_idx < count; idx++) {
@@ -207,15 +167,19 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (!res)
return -EINVAL;
- drvdata[data_idx].pbias_reg = res->start;
drvdata[data_idx].syscon = syscon;
drvdata[data_idx].info = info;
drvdata[data_idx].desc.name = info->name;
drvdata[data_idx].desc.owner = THIS_MODULE;
drvdata[data_idx].desc.type = REGULATOR_VOLTAGE;
drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops;
+ drvdata[data_idx].desc.volt_table = pbias_volt_table;
drvdata[data_idx].desc.n_voltages = 2;
drvdata[data_idx].desc.enable_time = info->enable_time;
+ drvdata[data_idx].desc.vsel_reg = res->start;
+ drvdata[data_idx].desc.vsel_mask = info->vmode;
+ drvdata[data_idx].desc.enable_reg = res->start;
+ drvdata[data_idx].desc.enable_mask = info->enable_mask;
cfg.init_data = pbias_matches[idx].init_data;
cfg.driver_data = &drvdata[data_idx];
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9f0ea6cb6922..e3bf885f4a6c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
- do {
+ static int ntsm_unsupported;
+
+ while (true) {
memset(sei, 0, sizeof(*sei));
sei->request.length = 0x0010;
sei->request.code = 0x000e;
- sei->ntsm = ntsm;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
if (chsc(sei))
break;
if (sei->response.code != 0x0001) {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei->response.code);
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
break;
}
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
break;
}
- } while (sei->u.nt0_area.flags & 0x80);
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
}
/*
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8cf4a0c69baf..9a6e4a2cd072 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -7463,6 +7463,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
if (hpsa_simple_mode)
return;
+ trans_support = readl(&(h->cfgtable->TransportSupport));
+ if (!(trans_support & PERFORMANT_MODE))
+ return;
+
/* Check for I/O accelerator mode support */
if (trans_support & CFGTBL_Trans_io_accel1) {
transMethod |= CFGTBL_Trans_io_accel1 |
@@ -7479,10 +7483,6 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
}
/* TODO, check that this next line h->nreply_queues is correct */
- trans_support = readl(&(h->cfgtable->TransportSupport));
- if (!(trans_support & PERFORMANT_MODE))
- return;
-
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
hpsa_get_max_perf_mode_cmds(h);
/* Performant mode ring buffer and supporting data structures */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 7f0af4fcc001..6fd7d40b2c4d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt2sas_base_free_resources(ioc);
pci_save_state(pdev);
- pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 771c16bfdbac..f17aa7aa7879 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -189,6 +189,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
/*
* Retry after abort failed, escalate to next level.
*/
+ scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"scmd %p previous abort failed\n", scmd));
@@ -920,10 +921,12 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
ses->prot_op = scmd->prot_op;
scmd->prot_op = SCSI_PROT_NORMAL;
+ scmd->eh_eflags = 0;
scmd->cmnd = ses->eh_cmnd;
memset(scmd->cmnd, 0, BLK_MAX_CDB);
memset(&scmd->sdb, 0, sizeof(scmd->sdb));
scmd->request->next_rq = NULL;
+ scmd->result = 0;
if (sense_bytes) {
scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1157,6 +1160,15 @@ int scsi_eh_get_sense(struct list_head *work_q,
__func__));
break;
}
+ if (status_byte(scmd->result) != CHECK_CONDITION)
+ /*
+ * don't request sense if there's no check condition
+ * status because the error we're processing isn't one
+ * that has a sense code (and some devices get
+ * confused by sense requests out of the blue)
+ */
+ continue;
+
SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
"%s: requesting sense\n",
current->comm));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 65a123d9c676..9db097a28a74 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -137,6 +137,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
* lock such that the kblockd_schedule_work() call happens
* before blk_cleanup_queue() finishes.
*/
+ cmd->result = 0;
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
kblockd_schedule_work(q, &device->requeue_work);
@@ -1044,6 +1045,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
*/
int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
+ struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
@@ -1091,7 +1093,7 @@ err_exit:
scsi_release_buffers(cmd);
cmd->request->special = NULL;
scsi_put_command(cmd);
- put_device(&cmd->device->sdev_gendev);
+ put_device(&sdev->sdev_gendev);
return error;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1273,7 +1275,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
struct scsi_cmnd *cmd = req->special;
scsi_release_buffers(cmd);
scsi_put_command(cmd);
- put_device(&cmd->device->sdev_gendev);
+ put_device(&sdev->sdev_gendev);
req->special = NULL;
}
break;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index fe30ea94ffe6..109802f776ed 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
goto next_msg;
}
- if (!capable(CAP_SYS_ADMIN)) {
+ if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
err = -EPERM;
goto next_msg;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 16bfd50cd3fe..db3b494e5926 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
vscsi->affinity_hint_set = true;
} else {
- for (i = 0; i < vscsi->num_queues; i++)
+ for (i = 0; i < vscsi->num_queues; i++) {
+ if (!vscsi->req_vqs[i].vq)
+ continue;
+
virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+ }
vscsi->affinity_hint_set = false;
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8005f9869481..079e6b1b0cdb 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1115,8 +1115,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
atmel_spi_next_xfer_pio(master, xfer);
}
+ /* interrupts are disabled, so free the lock for schedule */
+ atmel_spi_unlock(as);
ret = wait_for_completion_timeout(&as->xfer_completion,
SPI_DMA_TIMEOUT);
+ atmel_spi_lock(as);
if (WARN_ON(ret == 0)) {
dev_err(&spi->dev,
"spi trasfer timeout, err %d\n", ret);
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 55e57c3eb9bd..ebf720b88a2a 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/ioport.h>
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 9009456bdf4d..c8e795ef2e13 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -244,9 +244,9 @@ static int hspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
- clk = clk_get(NULL, "shyway_clk");
+ clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "shyway_clk is required\n");
+ dev_err(&pdev->dev, "couldn't get clock\n");
ret = -EINVAL;
goto error0;
}
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 1a77ad52812f..67d8909dcf39 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -287,8 +287,8 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
sspi->left_rx_word)
sspi->rx_word(sspi);
- if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
- | SIRFSOC_SPI_TXFIFO_THD_REACH))
+ if (spi_stat & (SIRFSOC_SPI_TXFIFO_EMPTY |
+ SIRFSOC_SPI_TXFIFO_THD_REACH))
while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
& SIRFSOC_SPI_FIFO_FULL)) &&
sspi->left_tx_word)
@@ -470,7 +470,16 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
} else {
int gpio = sspi->chipselect[spi->chip_select];
- gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ gpio_direction_output(gpio,
+ spi->mode & SPI_CS_HIGH ? 1 : 0);
+ break;
+ case BITBANG_CS_INACTIVE:
+ gpio_direction_output(gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
+ break;
+ }
}
}
@@ -559,6 +568,11 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
regval &= ~SIRFSOC_SPI_CMD_MODE;
sspi->tx_by_cmd = false;
}
+ /*
+ * set spi controller in RISC chipselect mode, we are controlling CS by
+ * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
+ */
+ regval |= SIRFSOC_SPI_CS_IO_MODE;
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
if (IS_DMA_VALID(t)) {
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 71db683098d6..b59af0303581 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -493,7 +493,7 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
/* pointer to the DA */
*datap++ = val & 0xff;
*datap++ = (val >> 8) & 0xff;
- *datap++ = chan;
+ *datap++ = chan << 6;
devpriv->ao_readback[chan] = val;
s->async->events |= COMEDI_CB_BLOCK;
@@ -1040,11 +1040,8 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
- for (i = 0; i < cmd->chanlist_len; ++i) {
- unsigned int chan = CR_CHAN(cmd->chanlist[i]);
-
- devpriv->ao_chanlist[i] = chan << 6;
- }
+ for (i = 0; i < cmd->chanlist_len; ++i)
+ devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
/* we count in steps of 1ms (125us) */
/* 125us mode not used yet */
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 11fb95201545..dae8d1a9038e 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1526,7 +1526,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
struct resource *iores;
int ret = 0, touch_ret;
int i, s;
- unsigned int scale_uv;
+ uint64_t scale_uv;
/* Allocate the IIO device. */
iio = devm_iio_device_alloc(dev, sizeof(*lradc));
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index 36eedd8a0ea9..017d2f8379b7 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -70,6 +70,7 @@ static int ad2s1200_read_raw(struct iio_dev *indio_dev,
vel = (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4);
vel = (vel << 4) >> 4;
*val = vel;
+ break;
default:
mutex_unlock(&st->lock);
return -EINVAL;
@@ -106,7 +107,7 @@ static int ad2s1200_probe(struct spi_device *spi)
int pn, ret = 0;
unsigned short *pins = spi->dev.platform_data;
- for (pn = 0; pn < AD2S1200_PN; pn++)
+ for (pn = 0; pn < AD2S1200_PN; pn++) {
ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT,
DRV_NAME);
if (ret) {
@@ -114,6 +115,7 @@ static int ad2s1200_probe(struct spi_device *spi)
pins[pn]);
return ret;
}
+ }
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 94f9e3a38412..0ff7fda0742f 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index)
return hvc_driver;
}
-static int __init hvc_console_setup(struct console *co, char *options)
+static int hvc_console_setup(struct console *co, char *options)
{
if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
return -ENODEV;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 41fe8a047d37..fe9d129c8735 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2353,8 +2353,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
+ struct n_tty_data *ldata = tty->disc_data;
+
while (nr > 0) {
+ mutex_lock(&ldata->output_lock);
c = tty->ops->write(tty, b, nr);
+ mutex_unlock(&ldata->output_lock);
if (c < 0) {
retval = c;
goto break_out;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 81f909c2101f..2d4bd3929e50 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
*/
if ((p->port.type == PORT_XR17V35X) ||
(p->port.type == PORT_XR17D15X)) {
- serial_out(p, UART_EXAR_SLEEP, 0xff);
+ serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
return;
}
@@ -1520,7 +1520,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if (status & UART_LSR_THRE)
+ if (!up->dma && (status & UART_LSR_THRE))
serial8250_tx_chars(up);
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 7046769608d4..ab9096dc3849 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -20,12 +20,15 @@ static void __dma_tx_complete(void *param)
struct uart_8250_port *p = param;
struct uart_8250_dma *dma = p->dma;
struct circ_buf *xmit = &p->port.state->xmit;
-
- dma->tx_running = 0;
+ unsigned long flags;
dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
+ spin_lock_irqsave(&p->port.lock, flags);
+
+ dma->tx_running = 0;
+
xmit->tail += dma->tx_size;
xmit->tail &= UART_XMIT_SIZE - 1;
p->port.icount.tx += dma->tx_size;
@@ -35,6 +38,8 @@ static void __dma_tx_complete(void *param)
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port))
serial8250_tx_dma(p);
+
+ spin_unlock_irqrestore(&p->port.lock, flags);
}
static void __dma_rx_complete(void *param)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 23f459600738..1f5505e7f90d 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1446,8 +1446,8 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
unsigned char c)
{
- unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
- unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+ unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
+ unsigned int ucon = rd_regl(port, S3C2410_UCON);
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
@@ -1455,7 +1455,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
- wr_regb(cons_uart, S3C2410_UTXH, c);
+ wr_regb(port, S3C2410_UTXH, c);
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -1463,22 +1463,23 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
static void
s3c24xx_serial_console_putchar(struct uart_port *port, int ch)
{
- unsigned int ufcon = rd_regl(cons_uart, S3C2410_UFCON);
- unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
-
- /* not possible to xmit on unconfigured port */
- if (!s3c24xx_port_configured(ucon))
- return;
+ unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
while (!s3c24xx_serial_console_txrdy(port, ufcon))
- barrier();
- wr_regb(cons_uart, S3C2410_UTXH, ch);
+ cpu_relax();
+ wr_regb(port, S3C2410_UTXH, ch);
}
static void
s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
+ unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+
+ /* not possible to xmit on unconfigured port */
+ if (!s3c24xx_port_configured(ucon))
+ return;
+
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f26834d262b3..b68550d95a40 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -137,6 +137,11 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
return 1;
/*
+ * Make sure the device is in D0 state.
+ */
+ uart_change_pm(state, UART_PM_STATE_ON);
+
+ /*
* Initialise and allocate the transmit and temporary
* buffer.
*/
@@ -825,25 +830,29 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
* If we fail to request resources for the
* new port, try to restore the old settings.
*/
- if (retval && old_type != PORT_UNKNOWN) {
+ if (retval) {
uport->iobase = old_iobase;
uport->type = old_type;
uport->hub6 = old_hub6;
uport->iotype = old_iotype;
uport->regshift = old_shift;
uport->mapbase = old_mapbase;
- retval = uport->ops->request_port(uport);
- /*
- * If we failed to restore the old settings,
- * we fail like this.
- */
- if (retval)
- uport->type = PORT_UNKNOWN;
- /*
- * We failed anyway.
- */
- retval = -EBUSY;
+ if (old_type != PORT_UNKNOWN) {
+ retval = uport->ops->request_port(uport);
+ /*
+ * If we failed to restore the old settings,
+ * we fail like this.
+ */
+ if (retval)
+ uport->type = PORT_UNKNOWN;
+
+ /*
+ * We failed anyway.
+ */
+ retval = -EBUSY;
+ }
+
/* Added to return the correct error -Ram Gupta */
goto exit;
}
@@ -1571,12 +1580,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
}
/*
- * Make sure the device is in D0 state.
- */
- if (port->count == 1)
- uart_change_pm(state, UART_PM_STATE_ON);
-
- /*
* Start up the serial port.
*/
retval = uart_startup(tty, state, 0);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 8ebd9f88a6f6..cf78d1985cd8 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -258,7 +258,11 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
n->flags = flags;
buf->tail = n;
b->commit = b->used;
- smp_mb();
+ /* paired w/ barrier in flush_to_ldisc(); ensures the
+ * latest commit value can be read before the head is
+ * advanced to the next buffer
+ */
+ smp_wmb();
b->next = n;
} else if (change)
size = 0;
@@ -444,17 +448,24 @@ static void flush_to_ldisc(struct work_struct *work)
while (1) {
struct tty_buffer *head = buf->head;
+ struct tty_buffer *next;
int count;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
break;
+ next = head->next;
+ /* paired w/ barrier in __tty_buffer_request_room();
+ * ensures commit value read is not stale if the head
+ * is advancing to the next buffer
+ */
+ smp_rmb();
count = head->commit - head->read;
if (!count) {
- if (head->next == NULL)
+ if (next == NULL)
break;
- buf->head = head->next;
+ buf->head = next;
tty_buffer_free(port, head);
continue;
}
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ca6831c5b763..1cd5d0ba587c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -277,6 +277,39 @@ static void hw_phymode_configure(struct ci_hdrc *ci)
}
/**
+ * ci_usb_phy_init: initialize phy according to different phy type
+ * @ci: the controller
+ *
+ * This function returns an error code if usb_phy_init has failed
+ */
+static int ci_usb_phy_init(struct ci_hdrc *ci)
+{
+ int ret;
+
+ switch (ci->platdata->phy_mode) {
+ case USBPHY_INTERFACE_MODE_UTMI:
+ case USBPHY_INTERFACE_MODE_UTMIW:
+ case USBPHY_INTERFACE_MODE_HSIC:
+ ret = usb_phy_init(ci->transceiver);
+ if (ret)
+ return ret;
+ hw_phymode_configure(ci);
+ break;
+ case USBPHY_INTERFACE_MODE_ULPI:
+ case USBPHY_INTERFACE_MODE_SERIAL:
+ hw_phymode_configure(ci);
+ ret = usb_phy_init(ci->transceiver);
+ if (ret)
+ return ret;
+ break;
+ default:
+ ret = usb_phy_init(ci->transceiver);
+ }
+
+ return ret;
+}
+
+/**
* hw_device_reset: resets chip (execute without interruption)
* @ci: the controller
*
@@ -543,8 +576,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
- hw_phymode_configure(ci);
-
if (ci->platdata->phy)
ci->transceiver = ci->platdata->phy;
else
@@ -564,7 +595,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- ret = usb_phy_init(ci->transceiver);
+ ret = ci_usb_phy_init(ci);
if (ret) {
dev_err(dev, "unable to init phy: %d\n", ret);
return ret;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index d001417e8e37..10aaaae9af25 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -821,6 +821,7 @@ static void dwc3_complete(struct device *dev)
spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_event_buffers_setup(dwc);
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
case USB_DR_MODE_OTG:
@@ -828,7 +829,6 @@ static void dwc3_complete(struct device *dev)
/* FALLTHROUGH */
case USB_DR_MODE_HOST:
default:
- dwc3_event_buffers_setup(dwc);
break;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index a740eac74d56..70715eeededd 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -187,15 +187,12 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
* improve this algorithm so that we better use the internal
* FIFO space
*/
- for (num = 0; num < DWC3_ENDPOINTS_NUM; num++) {
- struct dwc3_ep *dep = dwc->eps[num];
- int fifo_number = dep->number >> 1;
+ for (num = 0; num < dwc->num_in_eps; num++) {
+ /* bit0 indicates direction; 1 means IN ep */
+ struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
int mult = 1;
int tmp;
- if (!(dep->number & 1))
- continue;
-
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
@@ -224,8 +221,7 @@ int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
dep->name, last_fifo_depth, fifo_size & 0xffff);
- dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(fifo_number),
- fifo_size);
+ dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
last_fifo_depth += (fifo_size & 0xffff);
}
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index f605ad8c1902..cfd18bcca723 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (pdev->num_resources != 2) {
- DBG("invalid num_resources\n");
- return -ENODEV;
- }
- if ((pdev->resource[0].flags != IORESOURCE_MEM)
- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
- DBG("invalid resource type\n");
- return -ENODEV;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 2e164dca08e8..1e12b3ee56fd 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -745,6 +745,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
*/
struct usb_gadget *gadget = epfile->ffs->gadget;
+ spin_lock_irq(&epfile->ffs->eps_lock);
+ /* In the meantime, endpoint got disabled or changed. */
+ if (epfile->ep != ep) {
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ return -ESHUTDOWN;
+ }
/*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
@@ -752,6 +758,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
data_len = io_data->read ?
usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
io_data->len;
+ spin_unlock_irq(&epfile->ffs->eps_lock);
data = kmalloc(data_len, GFP_KERNEL);
if (unlikely(!data))
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index c11761ce5113..9a4f49dc6ac4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -377,7 +377,7 @@ static struct sk_buff *rndis_add_header(struct gether *port,
if (skb2)
rndis_add_hdr(skb2);
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
return skb2;
}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 15960af0f67e..a2f26cdb56fe 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -1219,6 +1219,10 @@ static int fsl_pullup(struct usb_gadget *gadget, int is_on)
struct fsl_udc *udc;
udc = container_of(gadget, struct fsl_udc, gadget);
+
+ if (!udc->vbus_active)
+ return -EOPNOTSUPP;
+
udc->softconnect = (is_on != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -2532,8 +2536,8 @@ static int __exit fsl_udc_remove(struct platform_device *pdev)
if (!udc_controller)
return -ENODEV;
- usb_del_gadget_udc(&udc_controller->gadget);
udc_controller->done = &done;
+ usb_del_gadget_udc(&udc_controller->gadget);
fsl_udc_clk_release();
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index b5be6f0308c2..a925d0cbcd41 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -2043,6 +2043,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
return -ESRCH;
/* fake probe to determine $CHIP */
+ CHIP = NULL;
usb_gadget_probe_driver(&probe_driver);
if (!CHIP)
return -ENODEV;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index d822d822efb3..7ed452d90f4d 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -35,6 +35,7 @@
#include <asm/byteorder.h>
#include <asm/unaligned.h>
+#include "u_rndis.h"
#undef VERBOSE_DEBUG
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 50d09c289137..b7d4f82872b7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -48,8 +48,6 @@
#define UETH__VERSION "29-May-2008"
-#define GETHER_NAPI_WEIGHT 32
-
struct eth_dev {
/* lock is held while accessing port_usb
*/
@@ -74,7 +72,6 @@ struct eth_dev {
struct sk_buff_head *list);
struct work_struct work;
- struct napi_struct rx_napi;
unsigned long todo;
#define WORK_RX_MEMORY 0
@@ -256,16 +253,18 @@ enomem:
DBG(dev, "rx submit --> %d\n", retval);
if (skb)
dev_kfree_skb_any(skb);
+ spin_lock_irqsave(&dev->req_lock, flags);
+ list_add(&req->list, &dev->rx_reqs);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
}
return retval;
}
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
- struct sk_buff *skb = req->context;
+ struct sk_buff *skb = req->context, *skb2;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
- bool rx_queue = 0;
switch (status) {
@@ -289,8 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
} else {
skb_queue_tail(&dev->rx_frames, skb);
}
- if (!status)
- rx_queue = 1;
+ skb = NULL;
+
+ skb2 = skb_dequeue(&dev->rx_frames);
+ while (skb2) {
+ if (status < 0
+ || ETH_HLEN > skb2->len
+ || skb2->len > VLAN_ETH_FRAME_LEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ DBG(dev, "rx length %d\n", skb2->len);
+ dev_kfree_skb_any(skb2);
+ goto next_frame;
+ }
+ skb2->protocol = eth_type_trans(skb2, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb2->len;
+
+ /* no buffer copies needed, unless hardware can't
+ * use skb buffers.
+ */
+ status = netif_rx(skb2);
+next_frame:
+ skb2 = skb_dequeue(&dev->rx_frames);
+ }
break;
/* software-driven interface shutdown */
@@ -313,20 +334,22 @@ quiesce:
/* FALLTHROUGH */
default:
- rx_queue = 1;
- dev_kfree_skb_any(skb);
dev->net->stats.rx_errors++;
DBG(dev, "rx status %d\n", status);
break;
}
+ if (skb)
+ dev_kfree_skb_any(skb);
+ if (!netif_running(dev->net)) {
clean:
spin_lock(&dev->req_lock);
list_add(&req->list, &dev->rx_reqs);
spin_unlock(&dev->req_lock);
-
- if (rx_queue && likely(napi_schedule_prep(&dev->rx_napi)))
- __napi_schedule(&dev->rx_napi);
+ req = NULL;
+ }
+ if (req)
+ rx_submit(dev, req, GFP_ATOMIC);
}
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -391,24 +414,16 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
unsigned long flags;
- int rx_counts = 0;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
while (!list_empty(&dev->rx_reqs)) {
-
- if (++rx_counts > qlen(dev->gadget, dev->qmult))
- break;
-
req = container_of(dev->rx_reqs.next,
struct usb_request, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
if (rx_submit(dev, req, gfp_flags) < 0) {
- spin_lock_irqsave(&dev->req_lock, flags);
- list_add(&req->list, &dev->rx_reqs);
- spin_unlock_irqrestore(&dev->req_lock, flags);
defer_kevent(dev, WORK_RX_MEMORY);
return;
}
@@ -418,41 +433,6 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
spin_unlock_irqrestore(&dev->req_lock, flags);
}
-static int gether_poll(struct napi_struct *napi, int budget)
-{
- struct eth_dev *dev = container_of(napi, struct eth_dev, rx_napi);
- struct sk_buff *skb;
- unsigned int work_done = 0;
- int status = 0;
-
- while ((skb = skb_dequeue(&dev->rx_frames))) {
- if (status < 0
- || ETH_HLEN > skb->len
- || skb->len > VLAN_ETH_FRAME_LEN) {
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- DBG(dev, "rx length %d\n", skb->len);
- dev_kfree_skb_any(skb);
- continue;
- }
- skb->protocol = eth_type_trans(skb, dev->net);
- dev->net->stats.rx_packets++;
- dev->net->stats.rx_bytes += skb->len;
-
- status = netif_rx_ni(skb);
- }
-
- if (netif_running(dev->net)) {
- rx_fill(dev, GFP_KERNEL);
- work_done++;
- }
-
- if (work_done < budget)
- napi_complete(&dev->rx_napi);
-
- return work_done;
-}
-
static void eth_work(struct work_struct *work)
{
struct eth_dev *dev = container_of(work, struct eth_dev, work);
@@ -645,7 +625,6 @@ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
/* and open the tx floodgates */
atomic_set(&dev->tx_qlen, 0);
netif_wake_queue(dev->net);
- napi_enable(&dev->rx_napi);
}
static int eth_open(struct net_device *net)
@@ -672,7 +651,6 @@ static int eth_stop(struct net_device *net)
unsigned long flags;
VDBG(dev, "%s\n", __func__);
- napi_disable(&dev->rx_napi);
netif_stop_queue(net);
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -790,7 +768,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
- netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
@@ -853,7 +830,6 @@ struct net_device *gether_setup_name_default(const char *netname)
return ERR_PTR(-ENOMEM);
dev = netdev_priv(net);
- netif_napi_add(net, &dev->rx_napi, gether_poll, GETHER_NAPI_WEIGHT);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->req_lock);
INIT_WORK(&dev->work, eth_work);
@@ -1137,7 +1113,6 @@ void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
- struct sk_buff *skb;
WARN_ON(!dev);
if (!dev)
@@ -1164,12 +1139,6 @@ void gether_disconnect(struct gether *link)
spin_lock(&dev->req_lock);
}
spin_unlock(&dev->req_lock);
-
- spin_lock(&dev->rx_frames.lock);
- while ((skb = __skb_dequeue(&dev->rx_frames)))
- dev_kfree_skb_any(skb);
- spin_unlock(&dev->rx_frames.lock);
-
link->in_ep->driver_data = NULL;
link->in_ep->desc = NULL;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 9f170c53e3d9..134f354ede62 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -300,7 +300,7 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
ss_opts->isoc_interval = gzero_options.isoc_interval;
ss_opts->isoc_maxpacket = gzero_options.isoc_maxpacket;
ss_opts->isoc_mult = gzero_options.isoc_mult;
- ss_opts->isoc_maxburst = gzero_options.isoc_maxpacket;
+ ss_opts->isoc_maxburst = gzero_options.isoc_maxburst;
ss_opts->bulk_buflen = gzero_options.bulk_buflen;
func_ss = usb_get_function(func_inst_ss);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 6f2c8d3899d2..cf2734b532a7 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
break;
}
- if (pdata->have_sysif_regs && pdata->controller_ver &&
+ if (pdata->have_sysif_regs &&
+ pdata->controller_ver > FSL_USB_VER_1_6 &&
(phy_mode == FSL_USB2_PHY_ULPI)) {
/* check PHY_CLK_VALID to get phy clk valid */
if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index c81c8721cc5a..cd871b895013 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -90,6 +90,24 @@ __acquires(ohci->lock)
dl_done_list (ohci);
finish_unlinks (ohci, ohci_frame_no(ohci));
+ /*
+ * Some controllers don't handle "global" suspend properly if
+ * there are unsuspended ports. For these controllers, put all
+ * the enabled ports into suspend before suspending the root hub.
+ */
+ if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) {
+ __hc32 __iomem *portstat = ohci->regs->roothub.portstatus;
+ int i;
+ unsigned temp;
+
+ for (i = 0; i < ohci->num_ports; (++i, ++portstat)) {
+ temp = ohci_readl(ohci, portstat);
+ if ((temp & (RH_PS_PES | RH_PS_PSS)) ==
+ RH_PS_PES)
+ ohci_writel(ohci, RH_PS_PSS, portstat);
+ }
+ }
+
/* maybe resume can wake root hub */
if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) {
ohci->hc_control |= OHCI_CTRL_RWE;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 90879e9ccbec..bb1509675727 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
}
+ ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND;
return 0;
}
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 9250cada13f0..4550ce05af7f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -405,6 +405,8 @@ struct ohci_hcd {
#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
#define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
+#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
+
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 47390e369cd4..35d447780707 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -134,6 +134,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
@@ -143,9 +145,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
- pdev->device == 0x0015 &&
- pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
- pdev->subsystem_device == 0xc0cd)
+ pdev->device == 0x0015)
xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5f926bea5ab1..7a0e3c720c00 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -550,6 +550,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_generic_trb *trb;
dma_addr_t addr;
+ u64 hw_dequeue;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
@@ -559,16 +560,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
stream_id);
return;
}
- state->new_cycle_state = 0;
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Finding segment containing stopped TRB.");
- state->new_deq_seg = find_trb_seg(cur_td->start_seg,
- dev->eps[ep_index].stopped_trb,
- &state->new_cycle_state);
- if (!state->new_deq_seg) {
- WARN_ON(1);
- return;
- }
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -577,46 +568,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
if (ep->ep_state & EP_HAS_STREAMS) {
struct xhci_stream_ctx *ctx =
&ep->stream_info->stream_ctx_array[stream_id];
- state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+ hw_dequeue = le64_to_cpu(ctx->stream_ring);
} else {
struct xhci_ep_ctx *ep_ctx
= xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+ hw_dequeue = le64_to_cpu(ep_ctx->deq);
}
+ /* Find virtual address and segment of hardware dequeue pointer */
+ state->new_deq_seg = ep_ring->deq_seg;
+ state->new_deq_ptr = ep_ring->dequeue;
+ while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+ != (dma_addr_t)(hw_dequeue & ~0xf)) {
+ next_trb(xhci, ep_ring, &state->new_deq_seg,
+ &state->new_deq_ptr);
+ if (state->new_deq_ptr == ep_ring->dequeue) {
+ WARN_ON(1);
+ return;
+ }
+ }
+ /*
+ * Find cycle state for last_trb, starting at old cycle state of
+ * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+ * return immediately and cannot toggle the cycle state if this search
+ * wraps around, so add one more toggle manually in that case.
+ */
+ state->new_cycle_state = hw_dequeue & 0x1;
+ if (ep_ring->first_seg == ep_ring->first_seg->next &&
+ cur_td->last_trb < state->new_deq_ptr)
+ state->new_cycle_state ^= 0x1;
+
state->new_deq_ptr = cur_td->last_trb;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Finding segment containing last TRB in TD.");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
- state->new_deq_ptr,
- &state->new_cycle_state);
+ state->new_deq_ptr, &state->new_cycle_state);
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
+ /* Increment to find next TRB after last_trb. Cycle if appropriate. */
trb = &state->new_deq_ptr->generic;
if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
(trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
- /*
- * If there is only one segment in a ring, find_trb_seg()'s while loop
- * will not run, and it will return before it has a chance to see if it
- * needs to toggle the cycle bit. It can't tell if the stalled transfer
- * ended just before the link TRB on a one-segment ring, or if the TD
- * wrapped around the top of the ring, because it doesn't have the TD in
- * question. Look for the one-segment case where stalled TRB's address
- * is greater than the new dequeue pointer address.
- */
- if (ep_ring->first_seg == ep_ring->first_seg->next &&
- state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
- state->new_cycle_state ^= 0x1;
+ /* Don't update the ring cycle state for the producer (us). */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Cycle state = 0x%x", state->new_cycle_state);
- /* Don't update the ring cycle state for the producer (us). */
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"New dequeue segment = %p (virtual)",
state->new_deq_seg);
@@ -799,7 +801,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
if (list_empty(&ep->cancelled_td_list)) {
xhci_stop_watchdog_timer_in_irq(xhci, ep);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
return;
}
@@ -867,11 +868,9 @@ remove_finished_td:
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
- /* Clear stopped_td and stopped_trb if endpoint is not halted */
- if (!(ep->ep_state & EP_HALTED)) {
+ /* Clear stopped_td if endpoint is not halted */
+ if (!(ep->ep_state & EP_HALTED))
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
- }
/*
* Drop the lock and complete the URBs in the cancelled TD list.
@@ -1941,14 +1940,12 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
ep->ep_state |= EP_HALTED;
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = stream_id;
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
ep->stopped_td = NULL;
- ep->stopped_trb = NULL;
ep->stopped_stream = 0;
xhci_ring_cmd_db(xhci);
@@ -2030,7 +2027,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
return 0;
} else {
if (trb_comp_code == COMP_STALL) {
@@ -2042,7 +2038,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* USB class driver clear the stall later.
*/
ep->stopped_td = td;
- ep->stopped_trb = event_trb;
ep->stopped_stream = ep_ring->stream_id;
} else if (xhci_requires_manual_halt_cleanup(xhci,
ep_ctx, trb_comp_code)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8fe4e124ddd4..300836972faa 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -408,16 +408,16 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
#else
-static int xhci_try_enable_msi(struct usb_hcd *hcd)
+static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
{
return 0;
}
-static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
{
}
-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
}
@@ -2954,7 +2954,6 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_ring_cmd_db(xhci);
}
virt_ep->stopped_td = NULL;
- virt_ep->stopped_trb = NULL;
virt_ep->stopped_stream = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index d280e9213d08..4746816aed3e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -865,8 +865,6 @@ struct xhci_virt_ep {
#define EP_GETTING_NO_STREAMS (1 << 5)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
- /* The TRB that was last reported in a stopped endpoint ring */
- union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
unsigned int stopped_stream;
/* Watchdog timer for stop endpoint command to cancel URBs */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 3372ded5def7..e2fd263585de 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -470,8 +470,9 @@ static int dsps_musb_exit(struct musb *musb)
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
del_timer_sync(&glue->timer);
-
usb_phy_shutdown(musb->xceiv);
+ debugfs_remove_recursive(glue->dbgfs_root);
+
return 0;
}
@@ -708,8 +709,6 @@ static int dsps_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- debugfs_remove_recursive(glue->dbgfs_root);
-
return 0;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index d341c149a2f9..d369bf1f3936 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -316,7 +316,13 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
{
struct omap2430_glue *glue = container_of(mailbox_work,
struct omap2430_glue, omap_musb_mailbox_work);
+ struct musb *musb = glue_to_musb(glue);
+ struct device *dev = musb->controller;
+
+ pm_runtime_get_sync(dev);
omap_musb_set_mailbox(glue);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -416,6 +422,7 @@ static int omap2430_musb_init(struct musb *musb)
omap_musb_set_mailbox(glue);
phy_init(musb->phy);
+ phy_power_on(musb->phy);
pm_runtime_put_noidle(musb->controller);
return 0;
@@ -478,6 +485,7 @@ static int omap2430_musb_exit(struct musb *musb)
del_timer_sync(&musb_idle_timer);
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
phy_exit(musb->phy);
return 0;
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
index d75196ad5f2f..35b6083b7999 100644
--- a/drivers/usb/phy/phy-am335x-control.c
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -3,6 +3,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include "am35x-phy-control.h"
struct am335x_control_usb {
@@ -86,6 +87,14 @@ static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
}
writel(val, usb_ctrl->phy_reg + reg);
+
+ /*
+ * Give the PHY ~1ms to complete the power up operation.
+ * Tests have shown unstable behaviour if other USB PHY related
+ * registers are written too shortly after such a transition.
+ */
+ if (on)
+ mdelay(1);
}
static const struct phy_control ctrl_am335x = {
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c47e5a6edde2..d03fadd2629f 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -303,17 +303,18 @@ int otg_statemachine(struct otg_fsm *fsm)
otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE);
break;
case OTG_STATE_A_WAIT_VRISE:
- if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld ||
- fsm->a_wait_vrise_tmout) {
+ if (fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_WAIT_BCON);
- }
+ else if (fsm->id || fsm->a_bus_drop ||
+ fsm->a_wait_vrise_tmout)
+ otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_WAIT_BCON:
if (!fsm->a_vbus_vld)
otg_set_state(fsm, OTG_STATE_A_VBUS_ERR);
else if (fsm->b_conn)
otg_set_state(fsm, OTG_STATE_A_HOST);
- else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout)
+ else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout)
otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL);
break;
case OTG_STATE_A_HOST:
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 8afa813d690b..36b6bce33b20 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -132,6 +132,9 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
+ if (!IS_ERR(phy))
+ phy = ERR_PTR(-ENODEV);
+
goto err0;
}
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index a2db5be9c305..df90dae53eb9 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -28,6 +28,7 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/serial.h>
+#include <linux/swab.h>
#include <linux/kfifo.h>
#include <linux/ioctl.h>
#include <linux/firmware.h>
@@ -280,7 +281,7 @@ static int read_download_mem(struct usb_device *dev, int start_address,
{
int status = 0;
__u8 read_length;
- __be16 be_start_address;
+ u16 be_start_address;
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
@@ -296,10 +297,14 @@ static int read_download_mem(struct usb_device *dev, int start_address,
if (read_length > 1) {
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
}
- be_start_address = cpu_to_be16(start_address);
+ /*
+ * NOTE: Must use swab as wIndex is sent in little-endian
+ * byte order regardless of host byte order.
+ */
+ be_start_address = swab16((u16)start_address);
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
(__u16)address_type,
- (__force __u16)be_start_address,
+ be_start_address,
buffer, read_length);
if (status) {
@@ -394,7 +399,7 @@ static int write_i2c_mem(struct edgeport_serial *serial,
struct device *dev = &serial->serial->dev->dev;
int status = 0;
int write_length;
- __be16 be_start_address;
+ u16 be_start_address;
/* We can only send a maximum of 1 aligned byte page at a time */
@@ -409,11 +414,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
- /* Write first page */
- be_start_address = cpu_to_be16(start_address);
+ /*
+ * Write first page.
+ *
+ * NOTE: Must use swab as wIndex is sent in little-endian byte order
+ * regardless of host byte order.
+ */
+ be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev,
UMPC_MEMORY_WRITE, (__u16)address_type,
- (__force __u16)be_start_address,
+ be_start_address,
buffer, write_length);
if (status) {
dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
@@ -436,11 +446,16 @@ static int write_i2c_mem(struct edgeport_serial *serial,
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
- /* Write next page */
- be_start_address = cpu_to_be16(start_address);
+ /*
+ * Write next page.
+ *
+ * NOTE: Must use swab as wIndex is sent in little-endian byte
+ * order regardless of host byte order.
+ */
+ be_start_address = swab16((u16)start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(__u16)address_type,
- (__force __u16)be_start_address,
+ be_start_address,
buffer, write_length);
if (status) {
dev_err(dev, "%s - ERROR %d\n", __func__, status);
@@ -585,8 +600,8 @@ static int get_descriptor_addr(struct edgeport_serial *serial,
if (rom_desc->Type == desc_type)
return start_address;
- start_address = start_address + sizeof(struct ti_i2c_desc)
- + rom_desc->Size;
+ start_address = start_address + sizeof(struct ti_i2c_desc) +
+ le16_to_cpu(rom_desc->Size);
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
@@ -599,7 +614,7 @@ static int valid_csum(struct ti_i2c_desc *rom_desc, __u8 *buffer)
__u16 i;
__u8 cs = 0;
- for (i = 0; i < rom_desc->Size; i++)
+ for (i = 0; i < le16_to_cpu(rom_desc->Size); i++)
cs = (__u8)(cs + buffer[i]);
if (cs != rom_desc->CheckSum) {
@@ -650,7 +665,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
break;
if ((start_address + sizeof(struct ti_i2c_desc) +
- rom_desc->Size) > TI_MAX_I2C_SIZE) {
+ le16_to_cpu(rom_desc->Size)) > TI_MAX_I2C_SIZE) {
status = -ENODEV;
dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
break;
@@ -665,7 +680,8 @@ static int check_i2c_image(struct edgeport_serial *serial)
/* Read the descriptor data */
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
- rom_desc->Size, buffer);
+ le16_to_cpu(rom_desc->Size),
+ buffer);
if (status)
break;
@@ -674,7 +690,7 @@ static int check_i2c_image(struct edgeport_serial *serial)
break;
}
start_address = start_address + sizeof(struct ti_i2c_desc) +
- rom_desc->Size;
+ le16_to_cpu(rom_desc->Size);
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
(start_address < TI_MAX_I2C_SIZE));
@@ -712,7 +728,7 @@ static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
/* Read the descriptor data */
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
- rom_desc->Size, buffer);
+ le16_to_cpu(rom_desc->Size), buffer);
if (status)
goto exit;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 367c7f08b27c..f213ee978516 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,8 +234,31 @@ static void option_instat_callback(struct urb *urb);
#define QUALCOMM_VENDOR_ID 0x05C6
#define CMOTECH_VENDOR_ID 0x16d8
-#define CMOTECH_PRODUCT_6008 0x6008
-#define CMOTECH_PRODUCT_6280 0x6280
+#define CMOTECH_PRODUCT_6001 0x6001
+#define CMOTECH_PRODUCT_CMU_300 0x6002
+#define CMOTECH_PRODUCT_6003 0x6003
+#define CMOTECH_PRODUCT_6004 0x6004
+#define CMOTECH_PRODUCT_6005 0x6005
+#define CMOTECH_PRODUCT_CGU_628A 0x6006
+#define CMOTECH_PRODUCT_CHE_628S 0x6007
+#define CMOTECH_PRODUCT_CMU_301 0x6008
+#define CMOTECH_PRODUCT_CHU_628 0x6280
+#define CMOTECH_PRODUCT_CHU_628S 0x6281
+#define CMOTECH_PRODUCT_CDU_680 0x6803
+#define CMOTECH_PRODUCT_CDU_685A 0x6804
+#define CMOTECH_PRODUCT_CHU_720S 0x7001
+#define CMOTECH_PRODUCT_7002 0x7002
+#define CMOTECH_PRODUCT_CHU_629K 0x7003
+#define CMOTECH_PRODUCT_7004 0x7004
+#define CMOTECH_PRODUCT_7005 0x7005
+#define CMOTECH_PRODUCT_CGU_629 0x7006
+#define CMOTECH_PRODUCT_CHU_629S 0x700a
+#define CMOTECH_PRODUCT_CHU_720I 0x7211
+#define CMOTECH_PRODUCT_7212 0x7212
+#define CMOTECH_PRODUCT_7213 0x7213
+#define CMOTECH_PRODUCT_7251 0x7251
+#define CMOTECH_PRODUCT_7252 0x7252
+#define CMOTECH_PRODUCT_7253 0x7253
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
@@ -287,6 +310,7 @@ static void option_instat_callback(struct urb *urb);
#define ALCATEL_PRODUCT_X060S_X200 0x0000
#define ALCATEL_PRODUCT_X220_X500D 0x0017
#define ALCATEL_PRODUCT_L100V 0x011e
+#define ALCATEL_PRODUCT_L800MA 0x0203
#define PIRELLI_VENDOR_ID 0x1266
#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -349,6 +373,7 @@ static void option_instat_callback(struct urb *urb);
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
#define OLIVETTI_PRODUCT_OLICARD200 0xc005
+#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
@@ -502,6 +527,10 @@ static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
+static const struct option_blacklist_info net_intf0_blacklist = {
+ .reserved = BIT(0),
+};
+
static const struct option_blacklist_info net_intf1_blacklist = {
.reserved = BIT(1),
};
@@ -1035,8 +1064,47 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
- { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
+ .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
+ .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1500,6 +1568,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
@@ -1545,6 +1615,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
.driver_info = (kernel_ulong_t)&net_intf6_blacklist
},
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist
+ },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 968a40201e5f..6c0a542e8ec1 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -136,12 +136,36 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 0)}, /* Sierra Wireless MC7710 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 2)}, /* Sierra Wireless MC7710 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68a2, 3)}, /* Sierra Wireless MC7710 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 0)}, /* Sierra Wireless MC73xx Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 2)}, /* Sierra Wireless MC73xx NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x68c0, 3)}, /* Sierra Wireless MC73xx Modem */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 81fc0dfcfdcf..6d40d56378d7 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1347,10 +1347,12 @@ static int usb_serial_register(struct usb_serial_driver *driver)
static void usb_serial_deregister(struct usb_serial_driver *device)
{
pr_info("USB Serial deregistering driver %s\n", device->description);
+
mutex_lock(&table_lock);
list_del(&device->driver_list);
- usb_serial_bus_deregister(device);
mutex_unlock(&table_lock);
+
+ usb_serial_bus_deregister(device);
}
/**
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 4ef2a80728f7..008d805c3d21 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf,
us->transport_name = "Shuttle USBAT";
us->transport = usbat_flash_transport;
us->transport_reset = usb_stor_CB_reset;
- us->max_lun = 1;
+ us->max_lun = 0;
result = usb_stor_probe2(us);
return result;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f4a82291894a..174a447868cd 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Daniele Forsi <dforsi@gmail.com> */
+UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350,
+ "Nokia",
+ "5300",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
+/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */
+UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742,
+ "Nokia",
+ "305",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64),
+
/* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */
UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
"Nokia",
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 44741267c917..3f485df96226 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -301,7 +301,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
if (chid)
result = uwb_radio_start(&wusbhc->pal);
- else
+ else if (wusbhc->uwb_rc)
uwb_radio_stop(&wusbhc->pal);
return result;
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index c8e2a47d62a7..3e2e4ed20157 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -2390,10 +2390,10 @@ error_complete:
done) {
dev_info(dev, "Control EP stall. Queue delayed work.\n");
- spin_lock_irq(&wa->xfer_list_lock);
+ spin_lock(&wa->xfer_list_lock);
/* move xfer from xfer_list to xfer_errored_list. */
list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
- spin_unlock_irq(&wa->xfer_list_lock);
+ spin_unlock(&wa->xfer_list_lock);
spin_unlock_irqrestore(&xfer->lock, flags);
queue_work(wusbd, &wa->xfer_error_work);
} else {
diff --git a/drivers/uwb/drp.c b/drivers/uwb/drp.c
index 1a2fd9795367..468c89fb6a16 100644
--- a/drivers/uwb/drp.c
+++ b/drivers/uwb/drp.c
@@ -59,6 +59,7 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
struct uwb_rceb *reply, ssize_t reply_size)
{
struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
+ unsigned long flags;
if (r != NULL) {
if (r->bResultCode != UWB_RC_RES_SUCCESS)
@@ -67,14 +68,14 @@ static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
} else
dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
- spin_lock_irq(&rc->rsvs_lock);
+ spin_lock_irqsave(&rc->rsvs_lock, flags);
if (rc->set_drp_ie_pending > 1) {
rc->set_drp_ie_pending = 0;
- uwb_rsv_queue_update(rc);
+ uwb_rsv_queue_update(rc);
} else {
- rc->set_drp_ie_pending = 0;
+ rc->set_drp_ie_pending = 0;
}
- spin_unlock_irq(&rc->rsvs_lock);
+ spin_unlock_irqrestore(&rc->rsvs_lock, flags);
}
/**