summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci2
-rw-r--r--Documentation/DocBook/media/Makefile2
-rw-r--r--Documentation/devicetree/bindings/clock/at91-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/ti-edma.txt4
-rw-r--r--Documentation/input/elantech.txt5
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/networking/filter.txt2
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--MAINTAINERS40
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am3517.dtsi16
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts5
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts1
-rw-r--r--arch/arm/boot/dts/armada-375-db.dts5
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts10
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9rl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts18
-rw-r--r--arch/arm/boot/dts/kirkwood-nsa310-common.dtsi18
-rw-r--r--arch/arm/boot/dts/kirkwood-t5325.dts5
-rw-r--r--arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi19
-rw-r--r--arch/arm/boot/dts/omap2.dtsi7
-rw-r--r--arch/arm/boot/dts/omap2420.dtsi8
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi7
-rw-r--r--arch/arm/boot/dts/omap3-cm-t3x30.dtsi66
-rw-r--r--arch/arm/boot/dts/omap3-igep.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts4
-rw-r--r--arch/arm/boot/dts/omap3-sb-t35.dtsi37
-rw-r--r--arch/arm/boot/dts/omap3-sbc-t3517.dts13
-rw-r--r--arch/arm/boot/dts/omap3.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi7
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_mci2.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_tcb1.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3_uart.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-ccu8540.dts1
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi14
-rw-r--r--arch/arm/common/edma.c48
-rw-r--r--arch/arm/configs/sunxi_defconfig2
-rw-r--r--arch/arm/include/asm/xen/page.h1
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S8
-rw-r--r--arch/arm/mach-orion5x/common.h2
-rw-r--r--arch/arm64/include/asm/memory.h1
-rw-r--r--arch/arm64/kernel/irq.c10
-rw-r--r--arch/arm64/mm/hugetlbpage.c4
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/metag/include/asm/barrier.h3
-rw-r--r--arch/metag/include/asm/processor.h2
-rw-r--r--arch/metag/include/uapi/asm/Kbuild2
-rw-r--r--arch/metag/include/uapi/asm/resource.h7
-rw-r--r--arch/mips/dec/ecc-berr.c1
-rw-r--r--arch/mips/dec/kn02xa-berr.c1
-rw-r--r--arch/mips/dec/prom/Makefile1
-rw-r--r--arch/mips/dec/prom/call_o32.S89
-rw-r--r--arch/mips/fw/lib/call_o32.S57
-rw-r--r--arch/mips/fw/sni/sniprom.c3
-rw-r--r--arch/mips/include/asm/dec/prom.h48
-rw-r--r--arch/mips/include/asm/rm9k-ocd.h56
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h398
-rw-r--r--arch/mips/include/uapi/asm/unistd.h9
-rw-r--r--arch/mips/kernel/proc.c9
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts1
-rw-r--r--arch/mips/lib/csum_partial.S9
-rw-r--r--arch/mips/lib/delay.c14
-rw-r--r--arch/mips/lib/strncpy_user.S13
-rw-r--r--arch/mips/loongson/Kconfig1
-rw-r--r--arch/mips/loongson/lemote-2f/clock.c5
-rw-r--r--arch/mips/mm/tlb-funcs.S4
-rw-r--r--arch/mips/mm/tlbex.c7
-rw-r--r--arch/mips/ralink/dts/mt7620a_eval.dts1
-rw-r--r--arch/mips/ralink/dts/rt2880_eval.dts1
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts1
-rw-r--r--arch/mips/ralink/dts/rt3883_eval.dts1
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/processor.h5
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/sys_parisc.c6
-rw-r--r--arch/parisc/kernel/syscall.S12
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/kernel/traps.c54
-rw-r--r--arch/parisc/mm/fault.c44
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c3
-rw-r--r--arch/s390/crypto/aes_s390.c3
-rw-r--r--arch/s390/crypto/des_s390.c3
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h89
-rw-r--r--arch/sparc/include/asm/tsb.h3
-rw-r--r--arch/sparc/kernel/head_64.S4
-rw-r--r--arch/sparc/kernel/ktlb.S2
-rw-r--r--arch/sparc/kernel/nmi.c21
-rw-r--r--arch/sparc/kernel/smp_64.c6
-rw-r--r--arch/sparc/kernel/sys32.S2
-rw-r--r--arch/sparc/kernel/sysfs.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c12
-rw-r--r--arch/sparc/lib/NG2memcpy.S1
-rw-r--r--arch/sparc/mm/fault_64.c98
-rw-r--r--arch/sparc/mm/gup.c2
-rw-r--r--arch/sparc/mm/init_64.c12
-rw-r--r--arch/sparc/mm/tlb.c26
-rw-r--r--arch/sparc/mm/tsb.c14
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/boot/Makefile4
-rw-r--r--arch/x86/boot/compressed/misc.c2
-rw-r--r--arch/x86/include/asm/hpet.h1
-rw-r--r--arch/x86/include/asm/hugetlb.h1
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/rdrand.c1
-rw-r--r--arch/x86/kernel/early-quirks.c16
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/reboot.c10
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/vsmp_64.c6
-rw-r--r--arch/x86/kernel/vsyscall_gtod.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/lguest/boot.c4
-rw-r--r--arch/x86/lib/msr.c2
-rw-r--r--arch/x86/math-emu/errors.c16
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/platform/efi/early_printk.c83
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c2
-rw-r--r--arch/x86/power/hibernate_64.c2
-rw-r--r--arch/x86/vdso/vdso32-setup.c8
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--arch/x86/xen/irq.c6
-rw-r--r--block/blk-cgroup.c15
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig17
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c117
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/acpi_processor.c1
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/tbutils.c7
-rw-r--r--drivers/acpi/battery.c329
-rw-r--r--drivers/acpi/blacklist.c21
-rw-r--r--drivers/acpi/cm_sbs.c105
-rw-r--r--drivers/acpi/video.c16
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c15
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_imx.c179
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/bus/mvebu-mbus.c22
-rw-r--r--drivers/char/agp/frontend.c1
-rw-r--r--drivers/char/random.c7
-rw-r--r--drivers/char/tpm/tpm_ppi.c8
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c33
-rw-r--r--drivers/clk/bcm/clk-kona.c64
-rw-r--r--drivers/clk/bcm/clk-kona.h28
-rw-r--r--drivers/clk/clk-divider.c37
-rw-r--r--drivers/clk/clk.c74
-rw-r--r--drivers/clk/shmobile/clk-mstp.c9
-rw-r--r--drivers/clk/socfpga/clk-pll.c7
-rw-r--r--drivers/clk/socfpga/clk.c23
-rw-r--r--drivers/clk/tegra/clk-pll.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c4
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/firmware/iscsi_ibft.c1
-rw-r--r--drivers/gpio/gpio-ich.c4
-rw-r--r--drivers/gpio/gpio-mcp23s08.c12
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c32
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c52
-rw-r--r--drivers/gpu/drm/i915/intel_display.c26
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c58
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c40
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c54
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c44
-rw-r--r--drivers/gpu/drm/radeon/cik.c148
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/radeon/cikd.h9
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c28
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c135
-rw-r--r--drivers/gpu/drm/radeon/r600.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c40
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c131
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c28
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c10
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/emc1403.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c3
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c67
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c38
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/input/keyboard/atkbd.c29
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c7
-rw-r--r--drivers/input/misc/bma150.c4
-rw-r--r--drivers/input/mouse/elantech.c26
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c8
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/md/dm-crypt.c61
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-thin.c23
-rw-r--r--drivers/md/md.c3
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/media/i2c/ov7670.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/media-device.c1
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c16
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c34
-rw-r--r--drivers/media/platform/davinci/vpif_display.c35
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/tuners/fc2580.c6
-rw-r--r--drivers/media/tuners/fc2580_priv.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Makefile1
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c48
-rw-r--r--drivers/media/usb/gspca/sonixb.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c12
-rw-r--r--drivers/memory/mvebu-devbus.c15
-rw-r--r--drivers/mfd/rtsx_pcr.c132
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c418
-rw-r--r--drivers/mtd/nand/davinci_nand.c6
-rw-r--r--drivers/net/bonding/bond_alb.c54
-rw-r--r--drivers/net/bonding/bond_main.c134
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/c_can/Kconfig7
-rw-r--r--drivers/net/can/c_can/c_can.c36
-rw-r--r--drivers/net/can/sja1000/peak_pci.c14
-rw-r--r--drivers/net/ethernet/altera/Makefile1
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c103
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h13
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c179
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h26
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h47
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c108
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c133
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c20
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/jme.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c54
-rw-r--r--drivers/net/ethernet/sfc/nic.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/macvlan.c18
-rw-r--r--drivers/net/phy/phy.c16
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/xen-netback/common.h2
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c102
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/pci/host/pci-mvebu.c92
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c4
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pnp/pnpbios/bioscalls.c2
-rw-r--r--drivers/rtc/rtc-hym8563.c3
-rw-r--r--drivers/rtc/rtc-pcf8523.c4
-rw-r--r--drivers/sh/Makefile14
-rw-r--r--drivers/sh/pm_runtime.c20
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c16
-rw-r--r--drivers/spi/spi-qup.c2
-rw-r--r--drivers/spi/spi.c124
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c7
-rw-r--r--drivers/staging/imx-drm/imx-tve.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c13
-rw-r--r--drivers/staging/media/sn9c102/sn9c102_devtable.h2
-rw-r--r--drivers/staging/rtl8723au/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_ops_linux.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c28
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c1
-rw-r--r--drivers/target/target_core_device.c12
-rw-r--r--drivers/target/target_core_transport.c2
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/xen/events/events_fifo.c41
-rw-r--r--fs/affs/super.c2
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/send.c2
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/dcache.c318
-rw-r--r--fs/exec.c6
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/fuse/dir.c146
-rw-r--r--fs/fuse/file.c84
-rw-r--r--fs/fuse/fuse_i.h10
-rw-r--r--fs/fuse/inode.c16
-rw-r--r--fs/hugetlbfs/inode.c5
-rw-r--r--fs/kernfs/file.c17
-rw-r--r--fs/locks.c36
-rw-r--r--fs/namei.c6
-rw-r--r--fs/nfsd/nfs4acl.c17
-rw-r--r--fs/nfsd/nfs4state.c25
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c8
-rw-r--r--fs/posix_acl.c6
-rw-r--r--fs/sysfs/file.c3
-rw-r--r--fs/sysfs/mount.c3
-rw-r--r--fs/xfs/xfs_attr.c24
-rw-r--r--fs/xfs/xfs_attr_leaf.c21
-rw-r--r--fs/xfs/xfs_attr_list.c1
-rw-r--r--fs/xfs/xfs_attr_remote.c8
-rw-r--r--fs/xfs/xfs_da_btree.h2
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_file.c8
-rw-r--r--fs/xfs/xfs_iops.c67
-rw-r--r--fs/xfs/xfs_log.c10
-rw-r--r--fs/xfs/xfs_mount.c2
-rw-r--r--fs/xfs/xfs_qm.c26
-rw-r--r--fs/xfs/xfs_sb.c4
-rw-r--r--fs/xfs/xfs_super.c4
-rw-r--r--include/asm-generic/resource.h2
-rw-r--r--include/drm/drm_pciids.h16
-rw-r--r--include/drm/i915_pciids.h4
-rw-r--r--include/dt-bindings/clock/at91.h (renamed from include/dt-bindings/clk/at91.h)0
-rw-r--r--include/linux/cgroup.h15
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/hugetlb.h10
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_vlan.h15
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/kernfs.h19
-rw-r--r--include/linux/linkage.h4
-rw-r--r--include/linux/mfd/rtsx_common.h1
-rw-r--r--include/linux/mfd/rtsx_pci.h6
-rw-r--r--include/linux/mlx4/qp.h11
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/net.h15
-rw-r--r--include/linux/netdevice.h27
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/slub_def.h9
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/trace/events/module.h2
-rw-r--r--include/uapi/asm-generic/resource.h7
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/audit.h12
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--init/main.c2
-rw-r--r--kernel/cgroup.c10
-rw-r--r--kernel/cgroup_freezer.c116
-rw-r--r--kernel/context_tracking.c2
-rw-r--r--kernel/events/core.c174
-rw-r--r--kernel/hrtimer.c8
-rw-r--r--kernel/locking/lockdep.c2
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/sched/core.c25
-rw-r--r--kernel/sched/cpudeadline.c4
-rw-r--r--kernel/sched/cpupri.c3
-rw-r--r--kernel/sched/cputime.c32
-rw-r--r--kernel/sched/deadline.c5
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--kernel/workqueue.c36
-rw-r--r--lib/dump_stack.c4
-rw-r--r--mm/Kconfig15
-rw-r--r--mm/compaction.c22
-rw-r--r--mm/filemap.c55
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/kmemleak.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c47
-rw-r--r--mm/memory-failure.c17
-rw-r--r--mm/mremap.c9
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c13
-rw-r--r--mm/slub.c41
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/util.c10
-rw-r--r--mm/vmscan.c18
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c52
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/fragmentation.c11
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/originator.c62
-rw-r--r--net/ceph/messenger.c20
-rw-r--r--net/ceph/osdmap.c5
-rw-r--r--net/core/dev.c80
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/utils.c8
-rw-r--r--net/dsa/dsa.c3
-rw-r--r--net/ipv4/ip_tunnel.c4
-rw-r--r--net/ipv4/ip_vti.c5
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/xfrm4_output.c32
-rw-r--r--net/ipv4/xfrm4_protocol.c19
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/ipv6/xfrm6_output.c22
-rw-r--r--net/ipv6/xfrm6_protocol.c11
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c20
-rw-r--r--net/mac80211/offchannel.c27
-rw-r--r--net/mac80211/trace.h4
-rw-r--r--net/mac80211/vht.c9
-rw-r--r--net/netfilter/nf_tables_core.c49
-rw-r--r--net/rxrpc/ar-key.c2
-rw-r--r--net/sched/cls_tcindex.c30
-rwxr-xr-xscripts/checksyscalls.sh5
-rw-r--r--security/apparmor/include/apparmor.h1
-rw-r--r--security/apparmor/lib.c14
-rw-r--r--security/device_cgroup.c202
-rw-r--r--sound/isa/sb/sb_mixer.c14
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_hdmi.c6
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/wm8962.c15
-rw-r--r--sound/soc/codecs/wm8962.h4
-rw-r--r--sound/soc/fsl/fsl_esai.c22
-rw-r--r--sound/soc/fsl/imx-audmux.c2
-rw-r--r--sound/soc/intel/sst-acpi.c1
-rw-r--r--sound/soc/intel/sst-baytrail-dsp.c2
-rw-r--r--sound/soc/intel/sst-baytrail-ipc.c8
-rw-r--r--sound/soc/intel/sst-dsp-priv.h1
-rw-r--r--sound/soc/intel/sst-dsp.c1
-rw-r--r--sound/soc/intel/sst-dsp.h1
-rw-r--r--sound/soc/intel/sst-firmware.c25
-rw-r--r--sound/soc/intel/sst-haswell-dsp.c4
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c31
-rw-r--r--sound/soc/intel/sst-haswell-ipc.h4
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c107
-rw-r--r--sound/soc/sh/rcar/core.c5
-rw-r--r--sound/soc/soc-dapm.c14
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/usb/card.c12
-rw-r--r--sound/usb/card.h1
-rw-r--r--sound/usb/endpoint.c15
-rw-r--r--sound/usb/pcm.c5
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/Makefile6
-rw-r--r--tools/lib/lockdep/Makefile5
528 files changed, 6232 insertions, 3738 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index a3c5a6685036..ab8d76dfaa80 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -117,7 +117,7 @@ Description:
What: /sys/bus/pci/devices/.../vpd
Date: February 2008
-Contact: Ben Hutchings <bhutchings@solarflare.com>
+Contact: Ben Hutchings <bwh@kernel.org>
Description:
A file named vpd in a device directory will be a
binary file containing the Vital Product Data for the
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index f9fd615427fb..1d27f0a1abd1 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -195,7 +195,7 @@ DVB_DOCUMENTED = \
#
install_media_images = \
- $(Q)cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
+ $(Q)-cp $(OBJIMGFILES) $(MEDIA_SRC_DIR)/v4l/*.svg $(MEDIA_OBJ_DIR)/media_api
$(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
$(Q)base64 -d $< >$@
diff --git a/Documentation/devicetree/bindings/clock/at91-clock.txt b/Documentation/devicetree/bindings/clock/at91-clock.txt
index cd5e23912888..6794cdc96d8f 100644
--- a/Documentation/devicetree/bindings/clock/at91-clock.txt
+++ b/Documentation/devicetree/bindings/clock/at91-clock.txt
@@ -62,7 +62,7 @@ Required properties for PMC node:
- interrupt-controller : tell that the PMC is an interrupt controller.
- #interrupt-cells : must be set to 1. The first cell encodes the interrupt id,
and reflect the bit position in the PMC_ER/DR/SR registers.
- You can use the dt macros defined in dt-bindings/clk/at91.h.
+ You can use the dt macros defined in dt-bindings/clock/at91.h.
0 (AT91_PMC_MOSCS) -> main oscillator ready
1 (AT91_PMC_LOCKA) -> PLL A ready
2 (AT91_PMC_LOCKB) -> PLL B ready
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index 5992dceec7af..02a25d99ca61 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -43,7 +43,7 @@ Example
clock-output-names =
"tpu0", "mmcif1", "sdhi3", "sdhi2",
"sdhi1", "sdhi0", "mmcif0";
- renesas,clock-indices = <
+ clock-indices = <
R8A7790_CLK_TPU0 R8A7790_CLK_MMCIF1 R8A7790_CLK_SDHI3
R8A7790_CLK_SDHI2 R8A7790_CLK_SDHI1 R8A7790_CLK_SDHI0
R8A7790_CLK_MMCIF0
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt
index 9fbbdb783a72..68ff2137bae7 100644
--- a/Documentation/devicetree/bindings/dma/ti-edma.txt
+++ b/Documentation/devicetree/bindings/dma/ti-edma.txt
@@ -29,6 +29,6 @@ edma: edma@49000000 {
dma-channels = <64>;
ti,edma-regions = <4>;
ti,edma-slots = <256>;
- ti,edma-xbar-event-map = <1 12
- 2 13>;
+ ti,edma-xbar-event-map = /bits/ 16 <1 12
+ 2 13>;
};
diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
index 5602eb71ad5d..e1ae127ed099 100644
--- a/Documentation/input/elantech.txt
+++ b/Documentation/input/elantech.txt
@@ -504,9 +504,12 @@ byte 5:
* reg_10
bit 7 6 5 4 3 2 1 0
- 0 0 0 0 0 0 0 A
+ 0 0 0 0 R F T A
A: 1 = enable absolute tracking
+ T: 1 = enable two finger mode auto correct
+ F: 1 = disable ABS Position Filter
+ R: 1 = enable real hardware resolution
6.2 Native absolute mode 6 byte packet format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 43842177b771..30a8ad0dae53 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2218,10 +2218,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noreplace-smp [X86-32,SMP] Don't replace SMP instructions
with UP alternatives
- nordrand [X86] Disable the direct use of the RDRAND
- instruction even if it is supported by the
- processor. RDRAND is still available to user
- space applications.
+ nordrand [X86] Disable kernel use of the RDRAND and
+ RDSEED instructions even if they are supported
+ by the processor. RDRAND and RDSEED are still
+ available to user space applications.
noresume [SWSUSP] Disables resume and restores original swap
space.
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index dc7dcc8c8184..58c443926647 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -277,7 +277,7 @@ Possible BPF extensions are shown in the following table:
mark skb->mark
queue skb->queue_mapping
hatype skb->dev->type
- rxhash skb->rxhash
+ rxhash skb->hash
cpu raw_smp_processor_id()
vlan_tci vlan_tx_tag_get(skb)
vlan_pr vlan_tx_tag_present(skb)
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 6fea79efb4cb..38112d512f47 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -578,7 +578,7 @@ processes. This also works in combination with mmap(2) on packet sockets.
Currently implemented fanout policies are:
- - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+ - PACKET_FANOUT_HASH: schedule to socket by skb's packet hash
- PACKET_FANOUT_LB: schedule to socket by round-robin
- PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
- PACKET_FANOUT_RND: schedule to socket by random selection
diff --git a/MAINTAINERS b/MAINTAINERS
index f5de16eb1955..75106235a38d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -537,7 +537,7 @@ L: linux-alpha@vger.kernel.org
F: arch/alpha/
ALTERA TRIPLE SPEED ETHERNET DRIVER
-M: Vince Bridgers <vbridgers2013@gmail.com
+M: Vince Bridgers <vbridgers2013@gmail.com>
L: netdev@vger.kernel.org
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
@@ -1893,14 +1893,15 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
-BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
+BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Christian Daudt <bcm@fixthebug.org>
M: Matt Porter <mporter@linaro.org>
L: bcm-kernel-feedback-list@broadcom.com
-T: git git://git.github.com/broadcom/bcm11351
+T: git git://github.com/broadcom/mach-bcm
S: Maintained
F: arch/arm/mach-bcm/
F: arch/arm/boot/dts/bcm113*
+F: arch/arm/boot/dts/bcm216*
F: arch/arm/boot/dts/bcm281*
F: arch/arm/configs/bcm_defconfig
F: drivers/mmc/host/sdhci_bcm_kona.c
@@ -2250,12 +2251,6 @@ L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/host/ohci-ep93xx.c
-CIRRUS LOGIC CS4270 SOUND DRIVER
-M: Timur Tabi <timur@tabi.org>
-L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Odd Fixes
-F: sound/soc/codecs/cs4270*
-
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: Brian Austin <brian.austin@cirrus.com>
M: Paul Handrigan <Paul.Handrigan@cirrus.com>
@@ -4823,6 +4818,14 @@ L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
+
+IRQCHIP DRIVERS
+M: Thomas Gleixner <tglx@linutronix.de>
+M: Jason Cooper <jason@lakedaemon.net>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core
F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
@@ -5495,15 +5498,15 @@ F: Documentation/hwmon/ltc4261
F: drivers/hwmon/ltc4261.c
LTP (Linux Test Project)
-M: Shubham Goyal <shubham@linux.vnet.ibm.com>
M: Mike Frysinger <vapier@gentoo.org>
M: Cyril Hrubis <chrubis@suse.cz>
-M: Caspar Zhang <caspar@casparzhang.com>
M: Wanlong Gao <gaowanlong@cn.fujitsu.com>
+M: Jan Stancek <jstancek@redhat.com>
+M: Stanislav Kholmanskikh <stanislav.kholmanskikh@oracle.com>
+M: Alexey Kodanev <alexey.kodanev@oracle.com>
L: ltp-list@lists.sourceforge.net (subscribers-only)
-W: http://ltp.sourceforge.net/
+W: http://linux-test-project.github.io/
T: git git://github.com/linux-test-project/ltp.git
-T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
S: Maintained
M32R ARCHITECTURE
@@ -6516,10 +6519,10 @@ T: git git://openrisc.net/~jonas/linux
F: arch/openrisc/
OPENVSWITCH
-M: Jesse Gross <jesse@nicira.com>
+M: Pravin Shelar <pshelar@nicira.com>
L: dev@openvswitch.org
W: http://openvswitch.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git
S: Maintained
F: net/openvswitch/
@@ -9112,6 +9115,9 @@ F: arch/um/os-Linux/drivers/
TURBOCHANNEL SUBSYSTEM
M: "Maciej W. Rozycki" <macro@linux-mips.org>
+M: Ralf Baechle <ralf@linux-mips.org>
+L: linux-mips@linux-mips.org
+Q: http://patchwork.linux-mips.org/project/linux-mips/list/
S: Maintained
F: drivers/tc/
F: include/linux/tc.h
@@ -9965,7 +9971,7 @@ F: drivers/net/hamradio/*scc.c
F: drivers/net/hamradio/z8530.h
ZBUD COMPRESSED PAGE ALLOCATOR
-M: Seth Jennings <sjenning@linux.vnet.ibm.com>
+M: Seth Jennings <sjennings@variantweb.net>
L: linux-mm@kvack.org
S: Maintained
F: mm/zbud.c
@@ -10010,7 +10016,7 @@ F: mm/zsmalloc.c
F: include/linux/zsmalloc.h
ZSWAP COMPRESSED SWAP CACHING
-M: Seth Jennings <sjenning@linux.vnet.ibm.com>
+M: Seth Jennings <sjennings@variantweb.net>
L: linux-mm@kvack.org
S: Maintained
F: mm/zswap.c
diff --git a/Makefile b/Makefile
index 28a7259e0f3b..9d993787afe0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index baf56cc92040..f1eea4af40ed 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -144,7 +144,7 @@
compatible = "ti,edma3";
ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
reg = <0x49000000 0x10000>,
- <0x44e10f90 0x10>;
+ <0x44e10f90 0x40>;
interrupts = <12 13 14>;
#dma-cells = <1>;
dma-channels = <64>;
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 788391f91684..5a452fdd7c5d 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -62,5 +62,21 @@
};
};
+&iva {
+ status = "disabled";
+};
+
+&mailbox {
+ status = "disabled";
+};
+
+&mmu_isp {
+ status = "disabled";
+};
+
+&smartreflex_mpu_iva {
+ status = "disabled";
+};
+
/include/ "am35xx-clocks.dtsi"
/include/ "omap36xx-am35xx-omap3430es2plus-clocks.dtsi"
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index df8798e8bd25..a055f7f0f14a 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -117,6 +117,11 @@
status = "okay";
};
+&gpio5 {
+ status = "okay";
+ ti,no-reset-on-init;
+};
+
&mmc1 {
status = "okay";
vmmc-supply = <&vmmcsd_fixed>;
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 82f238a9063f..3383c4b66803 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -67,6 +67,7 @@
i2c@11000 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
+ clock-frequency = <100000>;
status = "okay";
audio_codec: audio-codec@4a {
compatible = "cirrus,cs42l51";
diff --git a/arch/arm/boot/dts/armada-375-db.dts b/arch/arm/boot/dts/armada-375-db.dts
index 9378d3136b41..0451124e8ebf 100644
--- a/arch/arm/boot/dts/armada-375-db.dts
+++ b/arch/arm/boot/dts/armada-375-db.dts
@@ -79,6 +79,11 @@
};
};
+ sata@a0000 {
+ status = "okay";
+ nr-ports = <2>;
+ };
+
nand: nand@d0000 {
pinctrl-0 = <&nand_pins>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index 448373c4b0e5..90f0bf6f9271 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -49,7 +49,7 @@
/* Device Bus parameters are required */
/* Read parameters */
- devbus,bus-width = <8>;
+ devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 61bda687f782..0c756421ae6a 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -59,7 +59,7 @@
/* Device Bus parameters are required */
/* Read parameters */
- devbus,bus-width = <8>;
+ devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;
@@ -146,22 +146,22 @@
ethernet@70000 {
status = "okay";
phy = <&phy0>;
- phy-mode = "rgmii-id";
+ phy-mode = "qsgmii";
};
ethernet@74000 {
status = "okay";
phy = <&phy1>;
- phy-mode = "rgmii-id";
+ phy-mode = "qsgmii";
};
ethernet@30000 {
status = "okay";
phy = <&phy2>;
- phy-mode = "rgmii-id";
+ phy-mode = "qsgmii";
};
ethernet@34000 {
status = "okay";
phy = <&phy3>;
- phy-mode = "rgmii-id";
+ phy-mode = "qsgmii";
};
/* Front-side USB slot */
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index 985948ce67b3..5d42feb31049 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -39,7 +39,7 @@
/* Device Bus parameters are required */
/* Read parameters */
- devbus,bus-width = <8>;
+ devbus,bus-width = <16>;
devbus,turn-off-ps = <60000>;
devbus,badr-skew-ps = <0>;
devbus,acc-first-ps = <124000>;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index ce1375595e5f..4537259ce529 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -34,7 +34,7 @@
};
spi0: spi@f0004000 {
- cs-gpios = <&pioD 13 0>;
+ cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
status = "okay";
};
@@ -79,7 +79,7 @@
};
spi1: spi@f8008000 {
- cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+ cs-gpios = <&pioC 25 0>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index e21dda0e8986..3be973e9889a 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -10,7 +10,7 @@
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
/ {
model = "Atmel AT91SAM9261 family SoC";
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index 63e1784d272c..92a52faebef7 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -8,7 +8,7 @@
#include "skeleton.dtsi"
#include <dt-bindings/pinctrl/at91.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index 7c8c12969892..a3431d784870 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -244,7 +244,7 @@
&tve {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_vga_sync_1>;
- i2c-ddc-bus = <&i2c3>;
+ ddc-i2c-bus = <&i2c3>;
fsl,tve-mode = "vga";
fsl,hsync-pin = <4>;
fsl,vsync-pin = <6>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 9c2bff2252d0..6a1bf4ff83d5 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -115,7 +115,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx53-ipu";
- reg = <0x18000000 0x080000000>;
+ reg = <0x18000000 0x08000000>;
interrupts = <11 10>;
clocks = <&clks IMX5_CLK_IPU_GATE>,
<&clks IMX5_CLK_IPU_DI0_GATE>,
diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
index 32c6fb4a1162..b939f4f52d16 100644
--- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
+++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts
@@ -30,6 +30,16 @@
bootargs = "console=ttyS0,115200n8 earlyprintk";
};
+ mbus {
+ pcie-controller {
+ status = "okay";
+
+ pcie@1,0 {
+ status = "okay";
+ };
+ };
+ };
+
ocp@f1000000 {
pinctrl@10000 {
pmx_usb_led: pmx-usb-led {
@@ -73,14 +83,6 @@
ehci@50000 {
status = "okay";
};
-
- pcie-controller {
- status = "okay";
-
- pcie@1,0 {
- status = "okay";
- };
- };
};
gpio-leds {
diff --git a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
index aa78c2d11fe7..e2cc85cc3b87 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-nsa310-common.dtsi
@@ -4,6 +4,16 @@
/ {
model = "ZyXEL NSA310";
+ mbus {
+ pcie-controller {
+ status = "okay";
+
+ pcie@1,0 {
+ status = "okay";
+ };
+ };
+ };
+
ocp@f1000000 {
pinctrl: pinctrl@10000 {
@@ -26,14 +36,6 @@
status = "okay";
nr-ports = <2>;
};
-
- pcie-controller {
- status = "okay";
-
- pcie@1,0 {
- status = "okay";
- };
- };
};
gpio_poweroff {
diff --git a/arch/arm/boot/dts/kirkwood-t5325.dts b/arch/arm/boot/dts/kirkwood-t5325.dts
index 7d1c7677a18f..0bd70d928c69 100644
--- a/arch/arm/boot/dts/kirkwood-t5325.dts
+++ b/arch/arm/boot/dts/kirkwood-t5325.dts
@@ -127,11 +127,6 @@
i2c@11000 {
status = "okay";
-
- alc5621: alc5621@1a {
- compatible = "realtek,alc5621";
- reg = <0x1a>;
- };
};
serial@12000 {
diff --git a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
index f577b7df9a29..521c587acaee 100644
--- a/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
+++ b/arch/arm/boot/dts/omap-gpmc-smsc911x.dtsi
@@ -24,11 +24,10 @@
compatible = "smsc,lan9221", "smsc,lan9115";
bank-width = <2>;
gpmc,mux-add-data;
- gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <186>;
- gpmc,cs-wr-off-ns = <186>;
- gpmc,adv-on-ns = <12>;
- gpmc,adv-rd-off-ns = <48>;
+ gpmc,cs-on-ns = <1>;
+ gpmc,cs-rd-off-ns = <180>;
+ gpmc,cs-wr-off-ns = <180>;
+ gpmc,adv-rd-off-ns = <18>;
gpmc,adv-wr-off-ns = <48>;
gpmc,oe-on-ns = <54>;
gpmc,oe-off-ns = <168>;
@@ -36,12 +35,10 @@
gpmc,we-off-ns = <168>;
gpmc,rd-cycle-ns = <186>;
gpmc,wr-cycle-ns = <186>;
- gpmc,access-ns = <114>;
- gpmc,page-burst-access-ns = <6>;
- gpmc,bus-turnaround-ns = <12>;
- gpmc,cycle2cycle-delay-ns = <18>;
- gpmc,wr-data-mux-bus-ns = <90>;
- gpmc,wr-access-ns = <186>;
+ gpmc,access-ns = <144>;
+ gpmc,page-burst-access-ns = <24>;
+ gpmc,bus-turnaround-ns = <90>;
+ gpmc,cycle2cycle-delay-ns = <90>;
gpmc,cycle2cycle-samecsen;
gpmc,cycle2cycle-diffcsen;
vddvario-supply = <&vddvario>;
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index 22f35ea142c1..8f8c07da4ac1 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -71,13 +71,6 @@
interrupts = <58>;
};
- mailbox: mailbox@48094000 {
- compatible = "ti,omap2-mailbox";
- ti,hwmods = "mailbox";
- reg = <0x48094000 0x200>;
- interrupts = <26>;
- };
-
intc: interrupt-controller@1 {
compatible = "ti,omap2-intc";
interrupt-controller;
diff --git a/arch/arm/boot/dts/omap2420.dtsi b/arch/arm/boot/dts/omap2420.dtsi
index 85b1fb014c43..2d9979835f24 100644
--- a/arch/arm/boot/dts/omap2420.dtsi
+++ b/arch/arm/boot/dts/omap2420.dtsi
@@ -125,6 +125,14 @@
dma-names = "tx", "rx";
};
+ mailbox: mailbox@48094000 {
+ compatible = "ti,omap2-mailbox";
+ reg = <0x48094000 0x200>;
+ interrupts = <26>, <34>;
+ interrupt-names = "dsp", "iva";
+ ti,hwmods = "mailbox";
+ };
+
timer1: timer@48028000 {
compatible = "ti,omap2420-timer";
reg = <0x48028000 0x400>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index d09697dab55e..42d2c61c9e2d 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -216,6 +216,13 @@
dma-names = "tx", "rx";
};
+ mailbox: mailbox@48094000 {
+ compatible = "ti,omap2-mailbox";
+ reg = <0x48094000 0x200>;
+ interrupts = <26>;
+ ti,hwmods = "mailbox";
+ };
+
timer1: timer@49018000 {
compatible = "ti,omap2420-timer";
reg = <0x49018000 0x400>;
diff --git a/arch/arm/boot/dts/omap3-cm-t3x30.dtsi b/arch/arm/boot/dts/omap3-cm-t3x30.dtsi
index d00055809e31..25ba08331d88 100644
--- a/arch/arm/boot/dts/omap3-cm-t3x30.dtsi
+++ b/arch/arm/boot/dts/omap3-cm-t3x30.dtsi
@@ -10,18 +10,6 @@
cpu0-supply = <&vcc>;
};
};
-
- vddvario: regulator-vddvario {
- compatible = "regulator-fixed";
- regulator-name = "vddvario";
- regulator-always-on;
- };
-
- vdd33a: regulator-vdd33a {
- compatible = "regulator-fixed";
- regulator-name = "vdd33a";
- regulator-always-on;
- };
};
&omap3_pmx_core {
@@ -35,58 +23,34 @@
hsusb0_pins: pinmux_hsusb0_pins {
pinctrl-single,pins = <
- OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */
- OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */
- OMAP3_CORE1_IOPAD(0x21a4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */
- OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */
- OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */
- OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */
- OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */
- OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */
- OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */
- OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */
- OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */
- OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
+ OMAP3_CORE1_IOPAD(0x21a2, PIN_OUTPUT | MUX_MODE0) /* hsusb0_clk.hsusb0_clk */
+ OMAP3_CORE1_IOPAD(0x21a4, PIN_OUTPUT | MUX_MODE0) /* hsusb0_stp.hsusb0_stp */
+ OMAP3_CORE1_IOPAD(0x21a6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_dir.hsusb0_dir */
+ OMAP3_CORE1_IOPAD(0x21a8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_nxt.hsusb0_nxt */
+ OMAP3_CORE1_IOPAD(0x21aa, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data0.hsusb2_data0 */
+ OMAP3_CORE1_IOPAD(0x21ac, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data1.hsusb0_data1 */
+ OMAP3_CORE1_IOPAD(0x21ae, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data2.hsusb0_data2 */
+ OMAP3_CORE1_IOPAD(0x21b0, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data3 */
+ OMAP3_CORE1_IOPAD(0x21b2, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data4 */
+ OMAP3_CORE1_IOPAD(0x21b4, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data5 */
+ OMAP3_CORE1_IOPAD(0x21b6, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data6 */
+ OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT_PULLDOWN | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
>;
};
};
+#include "omap-gpmc-smsc911x.dtsi"
+
&gpmc {
ranges = <5 0 0x2c000000 0x01000000>;
- smsc1: ethernet@5,0 {
+ smsc1: ethernet@gpmc {
compatible = "smsc,lan9221", "smsc,lan9115";
pinctrl-names = "default";
pinctrl-0 = <&smsc1_pins>;
interrupt-parent = <&gpio6>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
reg = <5 0 0xff>;
- bank-width = <2>;
- gpmc,mux-add-data;
- gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <186>;
- gpmc,cs-wr-off-ns = <186>;
- gpmc,adv-on-ns = <12>;
- gpmc,adv-rd-off-ns = <48>;
- gpmc,adv-wr-off-ns = <48>;
- gpmc,oe-on-ns = <54>;
- gpmc,oe-off-ns = <168>;
- gpmc,we-on-ns = <54>;
- gpmc,we-off-ns = <168>;
- gpmc,rd-cycle-ns = <186>;
- gpmc,wr-cycle-ns = <186>;
- gpmc,access-ns = <114>;
- gpmc,page-burst-access-ns = <6>;
- gpmc,bus-turnaround-ns = <12>;
- gpmc,cycle2cycle-delay-ns = <18>;
- gpmc,wr-data-mux-bus-ns = <90>;
- gpmc,wr-access-ns = <186>;
- gpmc,cycle2cycle-samecsen;
- gpmc,cycle2cycle-diffcsen;
- vddvario-supply = <&vddvario>;
- vdd33a-supply = <&vdd33a>;
- reg-io-width = <4>;
- smsc,save-mac-address;
};
};
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi
index b97736d98a64..e2d163bf0619 100644
--- a/arch/arm/boot/dts/omap3-igep.dtsi
+++ b/arch/arm/boot/dts/omap3-igep.dtsi
@@ -107,7 +107,7 @@
>;
};
- smsc911x_pins: pinmux_smsc911x_pins {
+ smsc9221_pins: pinmux_smsc9221_pins {
pinctrl-single,pins = <
0x1a2 (PIN_INPUT | MUX_MODE4) /* mcspi1_cs2.gpio_176 */
>;
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 7abd64f6ae21..b22caaaf774b 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -10,7 +10,7 @@
*/
#include "omap3-igep.dtsi"
-#include "omap-gpmc-smsc911x.dtsi"
+#include "omap-gpmc-smsc9221.dtsi"
/ {
model = "IGEPv2 (TI OMAP AM/DM37x)";
@@ -248,7 +248,7 @@
ethernet@gpmc {
pinctrl-names = "default";
- pinctrl-0 = <&smsc911x_pins>;
+ pinctrl-0 = <&smsc9221_pins>;
reg = <5 0 0xff>;
interrupt-parent = <&gpio6>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/omap3-sb-t35.dtsi b/arch/arm/boot/dts/omap3-sb-t35.dtsi
index 7909c51b05a5..d59e3de1441e 100644
--- a/arch/arm/boot/dts/omap3-sb-t35.dtsi
+++ b/arch/arm/boot/dts/omap3-sb-t35.dtsi
@@ -2,20 +2,6 @@
* Common support for CompuLab SB-T35 used on SBC-T3530, SBC-T3517 and SBC-T3730
*/
-/ {
- vddvario_sb_t35: regulator-vddvario-sb-t35 {
- compatible = "regulator-fixed";
- regulator-name = "vddvario";
- regulator-always-on;
- };
-
- vdd33a_sb_t35: regulator-vdd33a-sb-t35 {
- compatible = "regulator-fixed";
- regulator-name = "vdd33a";
- regulator-always-on;
- };
-};
-
&omap3_pmx_core {
smsc2_pins: pinmux_smsc2_pins {
pinctrl-single,pins = <
@@ -37,11 +23,10 @@
reg = <4 0 0xff>;
bank-width = <2>;
gpmc,mux-add-data;
- gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <186>;
- gpmc,cs-wr-off-ns = <186>;
- gpmc,adv-on-ns = <12>;
- gpmc,adv-rd-off-ns = <48>;
+ gpmc,cs-on-ns = <1>;
+ gpmc,cs-rd-off-ns = <180>;
+ gpmc,cs-wr-off-ns = <180>;
+ gpmc,adv-rd-off-ns = <18>;
gpmc,adv-wr-off-ns = <48>;
gpmc,oe-on-ns = <54>;
gpmc,oe-off-ns = <168>;
@@ -49,16 +34,14 @@
gpmc,we-off-ns = <168>;
gpmc,rd-cycle-ns = <186>;
gpmc,wr-cycle-ns = <186>;
- gpmc,access-ns = <114>;
- gpmc,page-burst-access-ns = <6>;
- gpmc,bus-turnaround-ns = <12>;
- gpmc,cycle2cycle-delay-ns = <18>;
- gpmc,wr-data-mux-bus-ns = <90>;
- gpmc,wr-access-ns = <186>;
+ gpmc,access-ns = <144>;
+ gpmc,page-burst-access-ns = <24>;
+ gpmc,bus-turnaround-ns = <90>;
+ gpmc,cycle2cycle-delay-ns = <90>;
gpmc,cycle2cycle-samecsen;
gpmc,cycle2cycle-diffcsen;
- vddvario-supply = <&vddvario_sb_t35>;
- vdd33a-supply = <&vdd33a_sb_t35>;
+ vddvario-supply = <&vddvario>;
+ vdd33a-supply = <&vdd33a>;
reg-io-width = <4>;
smsc,save-mac-address;
};
diff --git a/arch/arm/boot/dts/omap3-sbc-t3517.dts b/arch/arm/boot/dts/omap3-sbc-t3517.dts
index 024c9c6c682d..42189b65d393 100644
--- a/arch/arm/boot/dts/omap3-sbc-t3517.dts
+++ b/arch/arm/boot/dts/omap3-sbc-t3517.dts
@@ -8,6 +8,19 @@
/ {
model = "CompuLab SBC-T3517 with CM-T3517";
compatible = "compulab,omap3-sbc-t3517", "compulab,omap3-cm-t3517", "ti,am3517", "ti,omap3";
+
+ /* Only one GPMC smsc9220 on SBC-T3517, CM-T3517 uses am35x Ethernet */
+ vddvario: regulator-vddvario-sb-t35 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddvario";
+ regulator-always-on;
+ };
+
+ vdd33a: regulator-vdd33a-sb-t35 {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd33a";
+ regulator-always-on;
+ };
};
&omap3_pmx_core {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index acb9019dc437..4231191ade06 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -61,7 +61,7 @@
ti,hwmods = "mpu";
};
- iva {
+ iva: iva {
compatible = "ti,iva2.2";
ti,hwmods = "iva";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index f8c9855ce587..36b4312a5e0d 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -630,6 +630,13 @@
status = "disabled";
};
+ mailbox: mailbox@4a0f4000 {
+ compatible = "ti,omap4-mailbox";
+ reg = <0x4a0f4000 0x200>;
+ interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ ti,hwmods = "mailbox";
+ };
+
timer1: timer@4ae18000 {
compatible = "ti,omap5430-timer";
reg = <0x4ae18000 0x80>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index eabcfdbb403a..a106b0872910 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -13,7 +13,7 @@
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
/ {
model = "Atmel SAMA5D3 family SoC";
diff --git a/arch/arm/boot/dts/sama5d3_mci2.dtsi b/arch/arm/boot/dts/sama5d3_mci2.dtsi
index b029fe7ef17a..1b02208ea6ff 100644
--- a/arch/arm/boot/dts/sama5d3_mci2.dtsi
+++ b/arch/arm/boot/dts/sama5d3_mci2.dtsi
@@ -9,7 +9,7 @@
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
/ {
ahb {
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
index 382b04431f66..02848453ca0c 100644
--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -9,7 +9,7 @@
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
/ {
aliases {
diff --git a/arch/arm/boot/dts/sama5d3_uart.dtsi b/arch/arm/boot/dts/sama5d3_uart.dtsi
index a9fa75e41652..7a8d4c6115f7 100644
--- a/arch/arm/boot/dts/sama5d3_uart.dtsi
+++ b/arch/arm/boot/dts/sama5d3_uart.dtsi
@@ -9,7 +9,7 @@
#include <dt-bindings/pinctrl/at91.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include <dt-bindings/clk/at91.h>
+#include <dt-bindings/clock/at91.h>
/ {
aliases {
diff --git a/arch/arm/boot/dts/ste-ccu8540.dts b/arch/arm/boot/dts/ste-ccu8540.dts
index 7f3baf51a3a9..32dd55e5f4e6 100644
--- a/arch/arm/boot/dts/ste-ccu8540.dts
+++ b/arch/arm/boot/dts/ste-ccu8540.dts
@@ -18,6 +18,7 @@
compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
memory@0 {
+ device_type = "memory";
reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 32efc105df83..aba1c8a3f388 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -87,7 +87,7 @@
pll4: clk@01c20018 {
#clock-cells = <0>;
- compatible = "allwinner,sun4i-a10-pll1-clk";
+ compatible = "allwinner,sun7i-a20-pll4-clk";
reg = <0x01c20018 0x4>;
clocks = <&osc24M>;
clock-output-names = "pll4";
@@ -109,6 +109,14 @@
clock-output-names = "pll6_sata", "pll6_other", "pll6";
};
+ pll8: clk@01c20040 {
+ #clock-cells = <0>;
+ compatible = "allwinner,sun7i-a20-pll4-clk";
+ reg = <0x01c20040 0x4>;
+ clocks = <&osc24M>;
+ clock-output-names = "pll8";
+ };
+
cpu: cpu@01c20054 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-cpu-clk";
@@ -805,9 +813,9 @@
status = "disabled";
};
- i2c4: i2c@01c2bc00 {
+ i2c4: i2c@01c2c000 {
compatible = "allwinner,sun4i-i2c";
- reg = <0x01c2bc00 0x400>;
+ reg = <0x01c2c000 0x400>;
interrupts = <0 89 4>;
clocks = <&apb1_gates 15>;
clock-frequency = <100000>;
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index 41bca32409fc..5339009b3c0c 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -1423,55 +1423,38 @@ EXPORT_SYMBOL(edma_clear_event);
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DMADEVICES)
-static int edma_of_read_u32_to_s16_array(const struct device_node *np,
- const char *propname, s16 *out_values,
- size_t sz)
+static int edma_xbar_event_map(struct device *dev, struct device_node *node,
+ struct edma_soc_info *pdata, size_t sz)
{
- int ret;
-
- ret = of_property_read_u16_array(np, propname, out_values, sz);
- if (ret)
- return ret;
-
- /* Terminate it */
- *out_values++ = -1;
- *out_values++ = -1;
-
- return 0;
-}
-
-static int edma_xbar_event_map(struct device *dev,
- struct device_node *node,
- struct edma_soc_info *pdata, int len)
-{
- int ret, i;
+ const char pname[] = "ti,edma-xbar-event-map";
struct resource res;
void __iomem *xbar;
- const s16 (*xbar_chans)[2];
+ s16 (*xbar_chans)[2];
+ size_t nelm = sz / sizeof(s16);
u32 shift, offset, mux;
+ int ret, i;
- xbar_chans = devm_kzalloc(dev,
- len/sizeof(s16) + 2*sizeof(s16),
- GFP_KERNEL);
+ xbar_chans = devm_kzalloc(dev, (nelm + 2) * sizeof(s16), GFP_KERNEL);
if (!xbar_chans)
return -ENOMEM;
ret = of_address_to_resource(node, 1, &res);
if (ret)
- return -EIO;
+ return -ENOMEM;
xbar = devm_ioremap(dev, res.start, resource_size(&res));
if (!xbar)
return -ENOMEM;
- ret = edma_of_read_u32_to_s16_array(node,
- "ti,edma-xbar-event-map",
- (s16 *)xbar_chans,
- len/sizeof(u32));
+ ret = of_property_read_u16_array(node, pname, (u16 *)xbar_chans, nelm);
if (ret)
return -EIO;
- for (i = 0; xbar_chans[i][0] != -1; i++) {
+ /* Invalidate last entry for the other user of this mess */
+ nelm >>= 1;
+ xbar_chans[nelm][0] = xbar_chans[nelm][1] = -1;
+
+ for (i = 0; i < nelm; i++) {
shift = (xbar_chans[i][1] & 0x03) << 3;
offset = xbar_chans[i][1] & 0xfffffffc;
mux = readl(xbar + offset);
@@ -1480,8 +1463,7 @@ static int edma_xbar_event_map(struct device *dev,
writel(mux, (xbar + offset));
}
- pdata->xbar_chans = xbar_chans;
-
+ pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
return 0;
}
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index b5df4a511b0a..81ba78eaf54a 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -37,7 +37,7 @@ CONFIG_SUN4I_EMAC=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
+CONFIG_STMMAC_ETH=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_WLAN is not set
CONFIG_SERIAL_8250=y
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index cf4f3e867395..ded062f9b358 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -77,7 +77,6 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
}
/* VIRT <-> MACHINE conversion */
#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
-#define virt_to_pfn(v) (PFN_DOWN(__pa(v)))
#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 75e92952c18e..40c5d5f1451c 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -1,7 +1,7 @@
/*
* Secondary CPU startup routine source file.
*
- * Copyright (C) 2009 Texas Instruments, Inc.
+ * Copyright (C) 2009-2014 Texas Instruments, Inc.
*
* Author:
* Santosh Shilimkar <santosh.shilimkar@ti.com>
@@ -28,9 +28,13 @@
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
* The primary core will update this flag using a hardware
-+ * register AuxCoreBoot0.
+ * register AuxCoreBoot0.
*/
ENTRY(omap5_secondary_startup)
+.arm
+THUMB( adr r9, BSYM(wait) ) @ CPU may be entered in ARM mode.
+THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
+THUMB( .thumb ) @ switch to Thumb now.
wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
ldr r0, [r2]
mov r0, r0, lsr #5
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index f565f9944af2..7548db2bfb8a 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -21,7 +21,7 @@ struct mv_sata_platform_data;
#define ORION_MBUS_DEVBUS_BOOT_ATTR 0x0f
#define ORION_MBUS_DEVBUS_TARGET(cs) 0x01
#define ORION_MBUS_DEVBUS_ATTR(cs) (~(1 << cs))
-#define ORION_MBUS_SRAM_TARGET 0x00
+#define ORION_MBUS_SRAM_TARGET 0x09
#define ORION_MBUS_SRAM_ATTR 0x00
/*
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index e94f9458aa6f..993bce527b85 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -138,6 +138,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys(x))
/*
* virt_to_page(k) convert a _valid_ virtual address to struct page *
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 473e5dbf8f39..0f08dfd69ebc 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -97,11 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids)
ret = true;
- }
+ /*
+ * when using forced irq_set_affinity we must ensure that the cpu
+ * being offlined is not present in the affinity mask, it may be
+ * selected as the target CPU otherwise
+ */
+ affinity = cpu_online_mask;
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 5e9aec358306..31eb959e9aa8 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
int pud_huge(pud_t pud)
{
+#ifndef __PAGETABLE_PMD_FOLDED
return !(pud_val(pud) & PUD_TABLE_BIT);
+#else
+ return 0;
+#endif
}
int pmd_huge_support(void)
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index ae763d8bf55a..fb13dc5e8f8c 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 314 /* length of syscall table */
+#define NR_syscalls 315 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 715e85f858de..7de0a2d65da4 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -327,5 +327,6 @@
#define __NR_finit_module 1335
#define __NR_sched_setattr 1336
#define __NR_sched_getattr 1337
+#define __NR_renameat2 1338
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index fa8d61a312a7..ba3d03503e84 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1775,6 +1775,7 @@ sys_call_table:
data8 sys_finit_module // 1335
data8 sys_sched_setattr
data8 sys_sched_getattr
+ data8 sys_renameat2
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 9d38b73989eb..33afa56ad47a 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 351
+#define NR_syscalls 352
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index b932dd470041..9cd82fbc7817 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -356,5 +356,6 @@
#define __NR_finit_module 348
#define __NR_sched_setattr 349
#define __NR_sched_getattr 350
+#define __NR_renameat2 351
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index b6223dc41d82..501e10212789 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -371,4 +371,5 @@ ENTRY(sys_call_table)
.long sys_finit_module
.long sys_sched_setattr
.long sys_sched_getattr /* 350 */
+ .long sys_renameat2
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5d6b4b407dda..2d6f0de77325 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -15,6 +15,7 @@ static inline void wr_fence(void)
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
barrier();
*flushptr = 0;
+ barrier();
}
#else /* CONFIG_METAG_META21 */
@@ -35,6 +36,7 @@ static inline void wr_fence(void)
*flushptr = 0;
*flushptr = 0;
*flushptr = 0;
+ barrier();
}
#endif /* !CONFIG_METAG_META21 */
@@ -68,6 +70,7 @@ static inline void fence(void)
volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
barrier();
*flushptr = 0;
+ barrier();
}
#define smp_mb() fence()
#define smp_rmb() fence()
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index f16477d1f571..a8a37477c66e 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -22,6 +22,8 @@
/* Add an extra page of padding at the top of the stack for the guard page. */
#define STACK_TOP (TASK_SIZE - PAGE_SIZE)
#define STACK_TOP_MAX STACK_TOP
+/* Maximum virtual space for stack */
+#define STACK_SIZE_MAX (CONFIG_MAX_STACK_SIZE_MB*1024*1024)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
diff --git a/arch/metag/include/uapi/asm/Kbuild b/arch/metag/include/uapi/asm/Kbuild
index 84e09feb4d54..ab78be2b6eb0 100644
--- a/arch/metag/include/uapi/asm/Kbuild
+++ b/arch/metag/include/uapi/asm/Kbuild
@@ -4,11 +4,11 @@ include include/uapi/asm-generic/Kbuild.asm
header-y += byteorder.h
header-y += ech.h
header-y += ptrace.h
-header-y += resource.h
header-y += sigcontext.h
header-y += siginfo.h
header-y += swab.h
header-y += unistd.h
generic-y += mman.h
+generic-y += resource.h
generic-y += setup.h
diff --git a/arch/metag/include/uapi/asm/resource.h b/arch/metag/include/uapi/asm/resource.h
deleted file mode 100644
index 526d23cc3054..000000000000
--- a/arch/metag/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _UAPI_METAG_RESOURCE_H
-#define _UAPI_METAG_RESOURCE_H
-
-#define _STK_LIM_MAX (1 << 28)
-#include <asm-generic/resource.h>
-
-#endif /* _UAPI_METAG_RESOURCE_H */
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c
index 5abf4e894216..2a66e908f6a9 100644
--- a/arch/mips/dec/ecc-berr.c
+++ b/arch/mips/dec/ecc-berr.c
@@ -21,6 +21,7 @@
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/cpu-type.h>
#include <asm/irq_regs.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c
index f434b759e3b9..ec606363b806 100644
--- a/arch/mips/dec/kn02xa-berr.c
+++ b/arch/mips/dec/kn02xa-berr.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <asm/addrspace.h>
+#include <asm/cpu-type.h>
#include <asm/irq_regs.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
diff --git a/arch/mips/dec/prom/Makefile b/arch/mips/dec/prom/Makefile
index 064ae7a76bdc..ae73e42ac20b 100644
--- a/arch/mips/dec/prom/Makefile
+++ b/arch/mips/dec/prom/Makefile
@@ -6,4 +6,3 @@
lib-y += init.o memory.o cmdline.o identify.o console.o
lib-$(CONFIG_32BIT) += locore.o
-lib-$(CONFIG_64BIT) += call_o32.o
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
deleted file mode 100644
index 8c8498159e43..000000000000
--- a/arch/mips/dec/prom/call_o32.S
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * O32 interface for the 64 (or N32) ABI.
- *
- * Copyright (C) 2002 Maciej W. Rozycki
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/* Maximum number of arguments supported. Must be even! */
-#define O32_ARGC 32
-/* Number of static registers we save. */
-#define O32_STATC 11
-/* Frame size for both of the above. */
-#define O32_FRAMESZ (4 * O32_ARGC + SZREG * O32_STATC)
-
- .text
-
-/*
- * O32 function call dispatcher, for interfacing 32-bit ROM routines.
- *
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1-a7 -- its first seven arguments
- * and the stack -- remaining ones (up to O32_ARGC, including a1-a7).
- * Static registers, gp and fp are preserved, v0 holds a result.
- * This code relies on the called o32 function for sp and ra
- * restoration and thus both this dispatcher and the current stack
- * have to be placed in a KSEGx (or KUSEG) address space. Any
- * pointers passed have to point to addresses within one of these
- * spaces as well.
- */
-NESTED(call_o32, O32_FRAMESZ, ra)
- REG_SUBU sp,O32_FRAMESZ
-
- REG_S ra,O32_FRAMESZ-1*SZREG(sp)
- REG_S fp,O32_FRAMESZ-2*SZREG(sp)
- REG_S gp,O32_FRAMESZ-3*SZREG(sp)
- REG_S s7,O32_FRAMESZ-4*SZREG(sp)
- REG_S s6,O32_FRAMESZ-5*SZREG(sp)
- REG_S s5,O32_FRAMESZ-6*SZREG(sp)
- REG_S s4,O32_FRAMESZ-7*SZREG(sp)
- REG_S s3,O32_FRAMESZ-8*SZREG(sp)
- REG_S s2,O32_FRAMESZ-9*SZREG(sp)
- REG_S s1,O32_FRAMESZ-10*SZREG(sp)
- REG_S s0,O32_FRAMESZ-11*SZREG(sp)
-
- move jp,a0
-
- sll a0,a1,zero
- sll a1,a2,zero
- sll a2,a3,zero
- sll a3,a4,zero
- sw a5,0x10(sp)
- sw a6,0x14(sp)
- sw a7,0x18(sp)
-
- PTR_LA t0,O32_FRAMESZ(sp)
- PTR_LA t1,0x1c(sp)
- li t2,O32_ARGC-7
-1:
- lw t3,(t0)
- REG_ADDU t0,SZREG
- sw t3,(t1)
- REG_SUBU t2,1
- REG_ADDU t1,4
- bnez t2,1b
-
- jalr jp
-
- REG_L s0,O32_FRAMESZ-11*SZREG(sp)
- REG_L s1,O32_FRAMESZ-10*SZREG(sp)
- REG_L s2,O32_FRAMESZ-9*SZREG(sp)
- REG_L s3,O32_FRAMESZ-8*SZREG(sp)
- REG_L s4,O32_FRAMESZ-7*SZREG(sp)
- REG_L s5,O32_FRAMESZ-6*SZREG(sp)
- REG_L s6,O32_FRAMESZ-5*SZREG(sp)
- REG_L s7,O32_FRAMESZ-4*SZREG(sp)
- REG_L gp,O32_FRAMESZ-3*SZREG(sp)
- REG_L fp,O32_FRAMESZ-2*SZREG(sp)
- REG_L ra,O32_FRAMESZ-1*SZREG(sp)
-
- REG_ADDU sp,O32_FRAMESZ
- jr ra
-END(call_o32)
diff --git a/arch/mips/fw/lib/call_o32.S b/arch/mips/fw/lib/call_o32.S
index b308b2a0613e..4703fe4dbd9a 100644
--- a/arch/mips/fw/lib/call_o32.S
+++ b/arch/mips/fw/lib/call_o32.S
@@ -1,7 +1,7 @@
/*
* O32 interface for the 64 (or N32) ABI.
*
- * Copyright (C) 2002 Maciej W. Rozycki
+ * Copyright (C) 2002, 2014 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -12,28 +12,37 @@
#include <asm/asm.h>
#include <asm/regdef.h>
+/* O32 register size. */
+#define O32_SZREG 4
/* Maximum number of arguments supported. Must be even! */
#define O32_ARGC 32
-/* Number of static registers we save. */
+/* Number of static registers we save. */
#define O32_STATC 11
-/* Frame size for static register */
-#define O32_FRAMESZ (SZREG * O32_STATC)
-/* Frame size on new stack */
-#define O32_FRAMESZ_NEW (SZREG + 4 * O32_ARGC)
+/* Argument area frame size. */
+#define O32_ARGSZ (O32_SZREG * O32_ARGC)
+/* Static register save area frame size. */
+#define O32_STATSZ (SZREG * O32_STATC)
+/* Stack pointer register save area frame size. */
+#define O32_SPSZ SZREG
+/* Combined area frame size. */
+#define O32_FRAMESZ (O32_ARGSZ + O32_SPSZ + O32_STATSZ)
+/* Switched stack frame size. */
+#define O32_NFRAMESZ (O32_ARGSZ + O32_SPSZ)
.text
/*
* O32 function call dispatcher, for interfacing 32-bit ROM routines.
*
- * The standard 64 (N32) calling sequence is supported, with a0
- * holding a function pointer, a1 a new stack pointer, a2-a7 -- its
- * first six arguments and the stack -- remaining ones (up to O32_ARGC,
- * including a2-a7). Static registers, gp and fp are preserved, v0 holds
- * a result. This code relies on the called o32 function for sp and ra
- * restoration and this dispatcher has to be placed in a KSEGx (or KUSEG)
- * address space. Any pointers passed have to point to addresses within
- * one of these spaces as well.
+ * The standard 64 (N32) calling sequence is supported, with a0 holding
+ * a function pointer, a1 a pointer to the new stack to call the
+ * function with or 0 if no stack switching is requested, a2-a7 -- the
+ * function call's first six arguments, and the stack -- the remaining
+ * arguments (up to O32_ARGC, including a2-a7). Static registers, gp
+ * and fp are preserved, v0 holds the result. This code relies on the
+ * called o32 function for sp and ra restoration and this dispatcher has
+ * to be placed in a KSEGx (or KUSEG) address space. Any pointers
+ * passed have to point to addresses within one of these spaces as well.
*/
NESTED(call_o32, O32_FRAMESZ, ra)
REG_SUBU sp,O32_FRAMESZ
@@ -51,32 +60,36 @@ NESTED(call_o32, O32_FRAMESZ, ra)
REG_S s0,O32_FRAMESZ-11*SZREG(sp)
move jp,a0
- REG_SUBU s0,a1,O32_FRAMESZ_NEW
- REG_S sp,O32_FRAMESZ_NEW-1*SZREG(s0)
+
+ move fp,sp
+ beqz a1,0f
+ REG_SUBU fp,a1,O32_NFRAMESZ
+0:
+ REG_S sp,O32_NFRAMESZ-1*SZREG(fp)
sll a0,a2,zero
sll a1,a3,zero
sll a2,a4,zero
sll a3,a5,zero
- sw a6,0x10(s0)
- sw a7,0x14(s0)
+ sw a6,4*O32_SZREG(fp)
+ sw a7,5*O32_SZREG(fp)
PTR_LA t0,O32_FRAMESZ(sp)
- PTR_LA t1,0x18(s0)
+ PTR_LA t1,6*O32_SZREG(fp)
li t2,O32_ARGC-6
1:
lw t3,(t0)
REG_ADDU t0,SZREG
sw t3,(t1)
REG_SUBU t2,1
- REG_ADDU t1,4
+ REG_ADDU t1,O32_SZREG
bnez t2,1b
- move sp,s0
+ move sp,fp
jalr jp
- REG_L sp,O32_FRAMESZ_NEW-1*SZREG(sp)
+ REG_L sp,O32_NFRAMESZ-1*SZREG(sp)
REG_L s0,O32_FRAMESZ-11*SZREG(sp)
REG_L s1,O32_FRAMESZ-10*SZREG(sp)
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 2c2cb182af4e..6aa264b9856a 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -40,7 +40,8 @@
#ifdef CONFIG_64BIT
-static u8 o32_stk[16384];
+/* O32 stack has to be 8-byte aligned. */
+static u64 o32_stk[4096];
#define O32_STK &o32_stk[sizeof(o32_stk)]
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index c0ead6313845..b59a2103b61a 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -113,31 +113,31 @@ extern int (*__pmax_close)(int);
#define __DEC_PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")
-int __DEC_PROM_O32(_rex_bootinit, (int (*)(void)));
-int __DEC_PROM_O32(_rex_bootread, (int (*)(void)));
-int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), memmap *));
+int __DEC_PROM_O32(_rex_bootinit, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_bootread, (int (*)(void), void *));
+int __DEC_PROM_O32(_rex_getbitmap, (int (*)(memmap *), void *, memmap *));
unsigned long *__DEC_PROM_O32(_rex_slot_address,
- (unsigned long *(*)(int), int));
-void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void)));
-int __DEC_PROM_O32(_rex_getsysid, (int (*)(void)));
-void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void)));
-
-int __DEC_PROM_O32(_prom_getchar, (int (*)(void)));
-char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), char *));
-int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), char *, ...));
-
-
-#define rex_bootinit() _rex_bootinit(__rex_bootinit)
-#define rex_bootread() _rex_bootread(__rex_bootread)
-#define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, x)
-#define rex_slot_address(x) _rex_slot_address(__rex_slot_address, x)
-#define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo)
-#define rex_getsysid() _rex_getsysid(__rex_getsysid)
-#define rex_clear_cache() _rex_clear_cache(__rex_clear_cache)
-
-#define prom_getchar() _prom_getchar(__prom_getchar)
-#define prom_getenv(x) _prom_getenv(__prom_getenv, x)
-#define prom_printf(x...) _prom_printf(__prom_printf, x)
+ (unsigned long *(*)(int), void *, int));
+void *__DEC_PROM_O32(_rex_gettcinfo, (void *(*)(void), void *));
+int __DEC_PROM_O32(_rex_getsysid, (int (*)(void), void *));
+void __DEC_PROM_O32(_rex_clear_cache, (void (*)(void), void *));
+
+int __DEC_PROM_O32(_prom_getchar, (int (*)(void), void *));
+char *__DEC_PROM_O32(_prom_getenv, (char *(*)(char *), void *, char *));
+int __DEC_PROM_O32(_prom_printf, (int (*)(char *, ...), void *, char *, ...));
+
+
+#define rex_bootinit() _rex_bootinit(__rex_bootinit, NULL)
+#define rex_bootread() _rex_bootread(__rex_bootread, NULL)
+#define rex_getbitmap(x) _rex_getbitmap(__rex_getbitmap, NULL, x)
+#define rex_slot_address(x) _rex_slot_address(__rex_slot_address, NULL, x)
+#define rex_gettcinfo() _rex_gettcinfo(__rex_gettcinfo, NULL)
+#define rex_getsysid() _rex_getsysid(__rex_getsysid, NULL)
+#define rex_clear_cache() _rex_clear_cache(__rex_clear_cache, NULL)
+
+#define prom_getchar() _prom_getchar(__prom_getchar, NULL)
+#define prom_getenv(x) _prom_getenv(__prom_getenv, NULL, x)
+#define prom_printf(x...) _prom_printf(__prom_printf, NULL, x)
#else /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/rm9k-ocd.h b/arch/mips/include/asm/rm9k-ocd.h
deleted file mode 100644
index b0b80d9ecf96..000000000000
--- a/arch/mips/include/asm/rm9k-ocd.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2004 by Basler Vision Technologies AG
- * Author: Thomas Koeller <thomas.koeller@baslerweb.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#if !defined(_ASM_RM9K_OCD_H)
-#define _ASM_RM9K_OCD_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <asm/io.h>
-
-extern volatile void __iomem * const ocd_base;
-extern volatile void __iomem * const titan_base;
-
-#define ocd_addr(__x__) (ocd_base + (__x__))
-#define titan_addr(__x__) (titan_base + (__x__))
-#define scram_addr(__x__) (scram_base + (__x__))
-
-/* OCD register access */
-#define ocd_readl(__offs__) __raw_readl(ocd_addr(__offs__))
-#define ocd_readw(__offs__) __raw_readw(ocd_addr(__offs__))
-#define ocd_readb(__offs__) __raw_readb(ocd_addr(__offs__))
-#define ocd_writel(__val__, __offs__) \
- __raw_writel((__val__), ocd_addr(__offs__))
-#define ocd_writew(__val__, __offs__) \
- __raw_writew((__val__), ocd_addr(__offs__))
-#define ocd_writeb(__val__, __offs__) \
- __raw_writeb((__val__), ocd_addr(__offs__))
-
-/* TITAN register access - 32 bit-wide only */
-#define titan_readl(__offs__) __raw_readl(titan_addr(__offs__))
-#define titan_writel(__val__, __offs__) \
- __raw_writel((__val__), titan_addr(__offs__))
-
-/* Protect access to shared TITAN registers */
-extern spinlock_t titan_lock;
-extern int titan_irqflags;
-#define lock_titan_regs() spin_lock_irqsave(&titan_lock, titan_irqflags)
-#define unlock_titan_regs() spin_unlock_irqrestore(&titan_lock, titan_irqflags)
-
-#endif /* !defined(_ASM_RM9K_OCD_H) */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index c6e9cd2bca8d..17960fe7a8ce 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -133,6 +133,8 @@ static inline int syscall_get_arch(void)
#ifdef CONFIG_64BIT
if (!test_thread_flag(TIF_32BIT_REGS))
arch |= __AUDIT_ARCH_64BIT;
+ if (test_thread_flag(TIF_32BIT_ADDR))
+ arch |= __AUDIT_ARCH_CONVENTION_MIPS64_N32;
#endif
#if defined(__LITTLE_ENDIAN)
arch |= __AUDIT_ARCH_LE;
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index df6e775f3fef..3125797f2a88 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -484,13 +484,13 @@ enum MIPS6e_i8_func {
* Damn ... bitfields depend from byteorder :-(
*/
#ifdef __MIPSEB__
-#define BITFIELD_FIELD(field, more) \
+#define __BITFIELD_FIELD(field, more) \
field; \
more
#elif defined(__MIPSEL__)
-#define BITFIELD_FIELD(field, more) \
+#define __BITFIELD_FIELD(field, more) \
more \
field;
@@ -499,112 +499,112 @@ enum MIPS6e_i8_func {
#endif
struct j_format {
- BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
- BITFIELD_FIELD(unsigned int target : 26,
+ __BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+ __BITFIELD_FIELD(unsigned int target : 26,
;))
};
struct i_format { /* signed immediate format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(signed int simmediate : 16,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(signed int simmediate : 16,
;))))
};
struct u_format { /* unsigned immediate format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int uimmediate : 16,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int uimmediate : 16,
;))))
};
struct c_format { /* Cache (>= R6000) format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(unsigned int c_op : 3,
- BITFIELD_FIELD(unsigned int cache : 2,
- BITFIELD_FIELD(unsigned int simmediate : 16,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int c_op : 3,
+ __BITFIELD_FIELD(unsigned int cache : 2,
+ __BITFIELD_FIELD(unsigned int simmediate : 16,
;)))))
};
struct r_format { /* Register format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int rd : 5,
- BITFIELD_FIELD(unsigned int re : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct p_format { /* Performance counter format (R10000) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int rd : 5,
- BITFIELD_FIELD(unsigned int re : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct f_format { /* FPU register format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int : 1,
- BITFIELD_FIELD(unsigned int fmt : 4,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int rd : 5,
- BITFIELD_FIELD(unsigned int re : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int : 1,
+ __BITFIELD_FIELD(unsigned int fmt : 4,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;)))))))
};
struct ma_format { /* FPU multiply and add format (MIPS IV) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int fr : 5,
- BITFIELD_FIELD(unsigned int ft : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int func : 4,
- BITFIELD_FIELD(unsigned int fmt : 2,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(unsigned int fmt : 2,
;)))))))
};
struct b_format { /* BREAK and SYSCALL */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int code : 20,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int code : 20,
+ __BITFIELD_FIELD(unsigned int func : 6,
;)))
};
struct ps_format { /* MIPS-3D / paired single format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(unsigned int ft : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct v_format { /* MDMX vector format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int sel : 4,
- BITFIELD_FIELD(unsigned int fmt : 1,
- BITFIELD_FIELD(unsigned int vt : 5,
- BITFIELD_FIELD(unsigned int vs : 5,
- BITFIELD_FIELD(unsigned int vd : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int sel : 4,
+ __BITFIELD_FIELD(unsigned int fmt : 1,
+ __BITFIELD_FIELD(unsigned int vt : 5,
+ __BITFIELD_FIELD(unsigned int vs : 5,
+ __BITFIELD_FIELD(unsigned int vd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;)))))))
};
struct spec3_format { /* SPEC3 */
- BITFIELD_FIELD(unsigned int opcode:6,
- BITFIELD_FIELD(unsigned int rs:5,
- BITFIELD_FIELD(unsigned int rt:5,
- BITFIELD_FIELD(signed int simmediate:9,
- BITFIELD_FIELD(unsigned int func:7,
+ __BITFIELD_FIELD(unsigned int opcode:6,
+ __BITFIELD_FIELD(unsigned int rs:5,
+ __BITFIELD_FIELD(unsigned int rt:5,
+ __BITFIELD_FIELD(signed int simmediate:9,
+ __BITFIELD_FIELD(unsigned int func:7,
;)))))
};
@@ -616,141 +616,141 @@ struct spec3_format { /* SPEC3 */
* if it is MIPS32 instruction re-encoded for use in the microMIPS ASE.
*/
struct fb_format { /* FPU branch format (MIPS32) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int bc : 5,
- BITFIELD_FIELD(unsigned int cc : 3,
- BITFIELD_FIELD(unsigned int flag : 2,
- BITFIELD_FIELD(signed int simmediate : 16,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int bc : 5,
+ __BITFIELD_FIELD(unsigned int cc : 3,
+ __BITFIELD_FIELD(unsigned int flag : 2,
+ __BITFIELD_FIELD(signed int simmediate : 16,
;)))))
};
struct fp0_format { /* FPU multiply and add format (MIPS32) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int fmt : 5,
- BITFIELD_FIELD(unsigned int ft : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fmt : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int ft : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int fmt : 3,
- BITFIELD_FIELD(unsigned int op : 2,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int fmt : 3,
+ __BITFIELD_FIELD(unsigned int op : 2,
+ __BITFIELD_FIELD(unsigned int func : 6,
;)))))))
};
struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int op : 5,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int op : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fmt : 2,
- BITFIELD_FIELD(unsigned int op : 8,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fmt : 2,
+ __BITFIELD_FIELD(unsigned int op : 8,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int cc : 3,
- BITFIELD_FIELD(unsigned int zero : 2,
- BITFIELD_FIELD(unsigned int fmt : 2,
- BITFIELD_FIELD(unsigned int op : 3,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int cc : 3,
+ __BITFIELD_FIELD(unsigned int zero : 2,
+ __BITFIELD_FIELD(unsigned int fmt : 2,
+ __BITFIELD_FIELD(unsigned int op : 3,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))))
};
struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fmt : 3,
- BITFIELD_FIELD(unsigned int op : 7,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fmt : 3,
+ __BITFIELD_FIELD(unsigned int op : 7,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct mm_fp4_format { /* FPU c.cond format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int cc : 3,
- BITFIELD_FIELD(unsigned int fmt : 3,
- BITFIELD_FIELD(unsigned int cond : 4,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int cc : 3,
+ __BITFIELD_FIELD(unsigned int fmt : 3,
+ __BITFIELD_FIELD(unsigned int cond : 4,
+ __BITFIELD_FIELD(unsigned int func : 6,
;)))))))
};
struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int index : 5,
- BITFIELD_FIELD(unsigned int base : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int op : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int op : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct fp6_format { /* FPU madd and msub format (MIPS IV) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int fr : 5,
- BITFIELD_FIELD(unsigned int ft : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int ft : 5,
- BITFIELD_FIELD(unsigned int fs : 5,
- BITFIELD_FIELD(unsigned int fd : 5,
- BITFIELD_FIELD(unsigned int fr : 5,
- BITFIELD_FIELD(unsigned int func : 6,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int ft : 5,
+ __BITFIELD_FIELD(unsigned int fs : 5,
+ __BITFIELD_FIELD(unsigned int fd : 5,
+ __BITFIELD_FIELD(unsigned int fr : 5,
+ __BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct mm_i_format { /* Immediate format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(unsigned int rs : 5,
- BITFIELD_FIELD(signed int simmediate : 16,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(signed int simmediate : 16,
;))))
};
struct mm_m_format { /* Multi-word load/store format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rd : 5,
- BITFIELD_FIELD(unsigned int base : 5,
- BITFIELD_FIELD(unsigned int func : 4,
- BITFIELD_FIELD(signed int simmediate : 12,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(signed int simmediate : 12,
;)))))
};
struct mm_x_format { /* Scaled indexed load format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int index : 5,
- BITFIELD_FIELD(unsigned int base : 5,
- BITFIELD_FIELD(unsigned int rd : 5,
- BITFIELD_FIELD(unsigned int func : 11,
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int index : 5,
+ __BITFIELD_FIELD(unsigned int base : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int func : 11,
;)))))
};
@@ -758,51 +758,51 @@ struct mm_x_format { /* Scaled indexed load format (microMIPS) */
* microMIPS instruction formats (16-bit length)
*/
struct mm_b0_format { /* Unconditional branch format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(signed int simmediate : 10,
- BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(signed int simmediate : 10,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;)))
};
struct mm_b1_format { /* Conditional branch format (microMIPS) */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rs : 3,
- BITFIELD_FIELD(signed int simmediate : 7,
- BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 3,
+ __BITFIELD_FIELD(signed int simmediate : 7,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;))))
};
struct mm16_m_format { /* Multi-word load/store format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int func : 4,
- BITFIELD_FIELD(unsigned int rlist : 2,
- BITFIELD_FIELD(unsigned int imm : 4,
- BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int func : 4,
+ __BITFIELD_FIELD(unsigned int rlist : 2,
+ __BITFIELD_FIELD(unsigned int imm : 4,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;)))))
};
struct mm16_rb_format { /* Signed immediate format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 3,
- BITFIELD_FIELD(unsigned int base : 3,
- BITFIELD_FIELD(signed int simmediate : 4,
- BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 3,
+ __BITFIELD_FIELD(unsigned int base : 3,
+ __BITFIELD_FIELD(signed int simmediate : 4,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;)))))
};
struct mm16_r3_format { /* Load from global pointer format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 3,
- BITFIELD_FIELD(signed int simmediate : 7,
- BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 3,
+ __BITFIELD_FIELD(signed int simmediate : 7,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;))))
};
struct mm16_r5_format { /* Load/store from stack pointer format */
- BITFIELD_FIELD(unsigned int opcode : 6,
- BITFIELD_FIELD(unsigned int rt : 5,
- BITFIELD_FIELD(signed int simmediate : 5,
- BITFIELD_FIELD(unsigned int : 16, /* Ignored */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(signed int simmediate : 5,
+ __BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;))))
};
@@ -810,57 +810,57 @@ struct mm16_r5_format { /* Load/store from stack pointer format */
* MIPS16e instruction formats (16-bit length)
*/
struct m16e_rr {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int rx : 3,
- BITFIELD_FIELD(unsigned int nd : 1,
- BITFIELD_FIELD(unsigned int l : 1,
- BITFIELD_FIELD(unsigned int ra : 1,
- BITFIELD_FIELD(unsigned int func : 5,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int rx : 3,
+ __BITFIELD_FIELD(unsigned int nd : 1,
+ __BITFIELD_FIELD(unsigned int l : 1,
+ __BITFIELD_FIELD(unsigned int ra : 1,
+ __BITFIELD_FIELD(unsigned int func : 5,
;))))))
};
struct m16e_jal {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int x : 1,
- BITFIELD_FIELD(unsigned int imm20_16 : 5,
- BITFIELD_FIELD(signed int imm25_21 : 5,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int x : 1,
+ __BITFIELD_FIELD(unsigned int imm20_16 : 5,
+ __BITFIELD_FIELD(signed int imm25_21 : 5,
;))))
};
struct m16e_i64 {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int func : 3,
- BITFIELD_FIELD(unsigned int imm : 8,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int func : 3,
+ __BITFIELD_FIELD(unsigned int imm : 8,
;)))
};
struct m16e_ri64 {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int func : 3,
- BITFIELD_FIELD(unsigned int ry : 3,
- BITFIELD_FIELD(unsigned int imm : 5,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int func : 3,
+ __BITFIELD_FIELD(unsigned int ry : 3,
+ __BITFIELD_FIELD(unsigned int imm : 5,
;))))
};
struct m16e_ri {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int rx : 3,
- BITFIELD_FIELD(unsigned int imm : 8,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int rx : 3,
+ __BITFIELD_FIELD(unsigned int imm : 8,
;)))
};
struct m16e_rri {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int rx : 3,
- BITFIELD_FIELD(unsigned int ry : 3,
- BITFIELD_FIELD(unsigned int imm : 5,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int rx : 3,
+ __BITFIELD_FIELD(unsigned int ry : 3,
+ __BITFIELD_FIELD(unsigned int imm : 5,
;))))
};
struct m16e_i8 {
- BITFIELD_FIELD(unsigned int opcode : 5,
- BITFIELD_FIELD(unsigned int func : 3,
- BITFIELD_FIELD(unsigned int imm : 8,
+ __BITFIELD_FIELD(unsigned int opcode : 5,
+ __BITFIELD_FIELD(unsigned int func : 3,
+ __BITFIELD_FIELD(unsigned int imm : 8,
;)))
};
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index d6e154a9e6a5..2692abb28e36 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -371,11 +371,12 @@
#define __NR_finit_module (__NR_Linux + 348)
#define __NR_sched_setattr (__NR_Linux + 349)
#define __NR_sched_getattr (__NR_Linux + 350)
+#define __NR_renameat2 (__NR_Linux + 351)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 350
+#define __NR_Linux_syscalls 351
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -699,11 +700,12 @@
#define __NR_getdents64 (__NR_Linux + 308)
#define __NR_sched_setattr (__NR_Linux + 309)
#define __NR_sched_getattr (__NR_Linux + 310)
+#define __NR_renameat2 (__NR_Linux + 311)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 310
+#define __NR_Linux_syscalls 311
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
@@ -1031,11 +1033,12 @@
#define __NR_finit_module (__NR_Linux + 312)
#define __NR_sched_setattr (__NR_Linux + 313)
#define __NR_sched_getattr (__NR_Linux + 314)
+#define __NR_renameat2 (__NR_Linux + 315)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 314
+#define __NR_Linux_syscalls 315
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index e40971b51d2f..037a44d962f3 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -124,14 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "kscratch registers\t: %d\n",
hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
-#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
- if (cpu_has_mipsmt) {
- seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
-#if defined(CONFIG_MIPS_MT_SMTC)
- seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
-#endif
- }
-#endif
+
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fdc70b400442..3245474f19d5 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -577,3 +577,4 @@ EXPORT(sys_call_table)
PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr /* 4350 */
+ PTR sys_renameat2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index dd99c3285aea..be2fedd4ae33 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -430,4 +430,5 @@ EXPORT(sys_call_table)
PTR sys_getdents64
PTR sys_sched_setattr
PTR sys_sched_getattr /* 5310 */
+ PTR sys_renameat2
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f68d2f4f0090..c1dbcda4b816 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -423,4 +423,5 @@ EXPORT(sysn32_call_table)
PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr
+ PTR sys_renameat2 /* 6315 */
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 70f6acecd928..f1343ccd7ed7 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -556,4 +556,5 @@ EXPORT(sys32_call_table)
PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr /* 4350 */
+ PTR sys_renameat2
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
index fac1f5b178eb..143b8a37b5e4 100644
--- a/arch/mips/lantiq/dts/easy50712.dts
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -8,6 +8,7 @@
};
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 2e4825e48388..9901237563c5 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -56,14 +56,20 @@
#define UNIT(unit) ((unit)*NBYTES)
#define ADDC(sum,reg) \
+ .set push; \
+ .set noat; \
ADD sum, reg; \
sltu v1, sum, reg; \
ADD sum, v1; \
+ .set pop
#define ADDC32(sum,reg) \
+ .set push; \
+ .set noat; \
addu sum, reg; \
sltu v1, sum, reg; \
addu sum, v1; \
+ .set pop
#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
LOAD _t0, (offset + UNIT(0))(src); \
@@ -710,6 +716,8 @@ LEAF(csum_partial)
ADDC(sum, t2)
.Ldone\@:
/* fold checksum */
+ .set push
+ .set noat
#ifdef USE_DOUBLE
dsll32 v1, sum, 0
daddu sum, v1
@@ -732,6 +740,7 @@ LEAF(csum_partial)
or sum, sum, t0
1:
#endif
+ .set pop
.set reorder
ADDC32(sum, psum)
jr ra
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 44713af15a62..705cfb7c1a74 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -6,7 +6,7 @@
* Copyright (C) 1994 by Waldorf Electronics
* Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2007 Maciej W. Rozycki
+ * Copyright (C) 2007, 2014 Maciej W. Rozycki
*/
#include <linux/module.h>
#include <linux/param.h>
@@ -15,6 +15,12 @@
#include <asm/compiler.h>
#include <asm/war.h>
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+#define GCC_DADDI_IMM_ASM() "I"
+#else
+#define GCC_DADDI_IMM_ASM() "r"
+#endif
+
void __delay(unsigned long loops)
{
__asm__ __volatile__ (
@@ -22,13 +28,13 @@ void __delay(unsigned long loops)
" .align 3 \n"
"1: bnez %0, 1b \n"
#if BITS_PER_LONG == 32
- " subu %0, 1 \n"
+ " subu %0, %1 \n"
#else
- " dsubu %0, 1 \n"
+ " dsubu %0, %1 \n"
#endif
" .set reorder \n"
: "=r" (loops)
- : "0" (loops));
+ : GCC_DADDI_IMM_ASM() (1), "0" (loops));
}
EXPORT_SYMBOL(__delay);
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index d3301cd1e9a5..3c32baf8b494 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm)
bnez v0, .Lfault\@
FEXPORT(__strncpy_from_\func\()_nocheck_asm)
- .set noreorder
move t0, zero
move v1, a1
.ifeqs "\func","kernel"
@@ -45,21 +44,21 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm)
.endif
PTR_ADDIU v1, 1
R10KCBARRIER(0(ra))
+ sb v0, (a0)
beqz v0, 2f
- sb v0, (a0)
PTR_ADDIU t0, 1
+ PTR_ADDIU a0, 1
bne t0, a2, 1b
- PTR_ADDIU a0, 1
2: PTR_ADDU v0, a1, t0
xor v0, a1
bltz v0, .Lfault\@
- nop
+ move v0, t0
jr ra # return n
- move v0, t0
END(__strncpy_from_\func\()_asm)
-.Lfault\@: jr ra
- li v0, -EFAULT
+.Lfault\@:
+ li v0, -EFAULT
+ jr ra
.section __ex_table,"a"
PTR 1b, .Lfault\@
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 7397be226a06..603d79a95f47 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -64,7 +64,6 @@ config LEMOTE_MACH3A
bool "Lemote Loongson 3A family machines"
select ARCH_SPARSEMEM_ENABLE
select GENERIC_ISA_DMA_SUPPORT_BROKEN
- select GENERIC_HARDIRQS_NO__DO_IRQ
select BOOT_ELF32
select BOARD_SCACHE
select CSRC_R4K
diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
index e1f427f4f5f3..67dd94ef28e6 100644
--- a/arch/mips/loongson/lemote-2f/clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
+ unsigned int rate_khz = rate / 1000;
int ret = 0;
int regval;
int i;
@@ -111,10 +112,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (loongson2_clockmod_table[i].frequency ==
CPUFREQ_ENTRY_INVALID)
continue;
- if (rate == loongson2_clockmod_table[i].frequency)
+ if (rate_khz == loongson2_clockmod_table[i].frequency)
break;
}
- if (rate != loongson2_clockmod_table[i].frequency)
+ if (rate_khz != loongson2_clockmod_table[i].frequency)
return -ENOTSUPP;
clk->rate = rate;
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
index 30a494db99c2..a5427c6e9757 100644
--- a/arch/mips/mm/tlb-funcs.S
+++ b/arch/mips/mm/tlb-funcs.S
@@ -16,8 +16,10 @@
#define FASTPATH_SIZE 128
+EXPORT(tlbmiss_handler_setup_pgd_start)
LEAF(tlbmiss_handler_setup_pgd)
- .space 16 * 4
+1: j 1b /* Dummy, will be replaced. */
+ .space 64
END(tlbmiss_handler_setup_pgd)
EXPORT(tlbmiss_handler_setup_pgd_end)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index ee88367ab3ad..f99ec587b151 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1422,16 +1422,17 @@ static void build_r4000_tlb_refill_handler(void)
extern u32 handle_tlbl[], handle_tlbl_end[];
extern u32 handle_tlbs[], handle_tlbs_end[];
extern u32 handle_tlbm[], handle_tlbm_end[];
-extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
+extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
+extern u32 tlbmiss_handler_setup_pgd_end[];
static void build_setup_pgd(void)
{
const int a0 = 4;
const int __maybe_unused a1 = 5;
const int __maybe_unused a2 = 6;
- u32 *p = tlbmiss_handler_setup_pgd;
+ u32 *p = tlbmiss_handler_setup_pgd_start;
const int tlbmiss_handler_setup_pgd_size =
- tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
+ tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
long pgdc = (long)pgd_current;
#endif
diff --git a/arch/mips/ralink/dts/mt7620a_eval.dts b/arch/mips/ralink/dts/mt7620a_eval.dts
index 35eb874ab7f1..709f58132f5c 100644
--- a/arch/mips/ralink/dts/mt7620a_eval.dts
+++ b/arch/mips/ralink/dts/mt7620a_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink MT7620A evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/mips/ralink/dts/rt2880_eval.dts b/arch/mips/ralink/dts/rt2880_eval.dts
index 322d7002595b..0a685db093d4 100644
--- a/arch/mips/ralink/dts/rt2880_eval.dts
+++ b/arch/mips/ralink/dts/rt2880_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink RT2880 evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x8000000 0x2000000>;
};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index 0ac73ea28198..ec9e9a035541 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink RT3052 evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/mips/ralink/dts/rt3883_eval.dts b/arch/mips/ralink/dts/rt3883_eval.dts
index 2fa6b330bf4f..e8df21a5d10d 100644
--- a/arch/mips/ralink/dts/rt3883_eval.dts
+++ b/arch/mips/ralink/dts/rt3883_eval.dts
@@ -7,6 +7,7 @@
model = "Ralink RT3883 evaluation board";
memory@0 {
+ device_type = "memory";
reg = <0x0 0x2000000>;
};
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 1faefed32749..108d48e652af 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -22,6 +22,7 @@ config PARISC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
+ select SYSCTL_EXCEPTION_TRACE
select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 198a86feb574..d951c9681ab3 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -55,6 +55,11 @@
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX DEFAULT_TASK_SIZE
+/* Allow bigger stacks for 64-bit processes */
+#define STACK_SIZE_MAX (USER_WIDE_MODE \
+ ? (1 << 30) /* 1 GB */ \
+ : (CONFIG_MAX_STACK_SIZE_MB*1024*1024))
+
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 265ae5190b0a..47e0e21d2272 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -829,8 +829,9 @@
#define __NR_sched_setattr (__NR_Linux + 334)
#define __NR_sched_getattr (__NR_Linux + 335)
#define __NR_utimes (__NR_Linux + 336)
+#define __NR_renameat2 (__NR_Linux + 337)
-#define __NR_Linux_syscalls (__NR_utimes + 1)
+#define __NR_Linux_syscalls (__NR_renameat2 + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 31ffa9b55322..e1ffea2f9a0b 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -72,10 +72,10 @@ static unsigned long mmap_upper_limit(void)
{
unsigned long stack_base;
- /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
+ /* Limit stack size - see setup_arg_pages() in fs/exec.c */
stack_base = rlimit_max(RLIMIT_STACK);
- if (stack_base > (1 << 30))
- stack_base = 1 << 30;
+ if (stack_base > STACK_SIZE_MAX)
+ stack_base = STACK_SIZE_MAX;
return PAGE_ALIGN(STACK_TOP - stack_base);
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index a63bb179f79a..838786011037 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -589,10 +589,13 @@ cas_nocontend:
# endif
/* ENABLE_LWS_DEBUG */
+ rsm PSW_SM_I, %r0 /* Disable interrupts */
+ /* COW breaks can cause contention on UP systems */
LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */
cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
cas_wouldblock:
ldo 2(%r0), %r28 /* 2nd case */
+ ssm PSW_SM_I, %r0
b lws_exit /* Contended... */
ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
@@ -619,15 +622,17 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw 0(%sr3,%r26), %r28
+1: ldw,ma 0(%sr3,%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw %r24, 0(%sr3,%r26)
+2: stw,ma %r24, 0(%sr3,%r26)
/* Free lock */
- stw %r20, 0(%sr2,%r20)
+ stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
#endif
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
b lws_exit
copy %r0, %r21
@@ -639,6 +644,7 @@ cas_action:
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
#endif
+ ssm PSW_SM_I, %r0
b lws_exit
ldo -EFAULT(%r0),%r21 /* set errno */
nop
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 83ead0ea127d..c5fa7a697fba 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -432,6 +432,7 @@
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
ENTRY_COMP(utimes)
+ ENTRY_SAME(renameat2)
/* Nothing yet */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 1cd1d0c83b6d..47ee620d15d2 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/console.h>
#include <linux/bug.h>
+#include <linux/ratelimit.h>
#include <asm/assembly.h>
#include <asm/uaccess.h>
@@ -42,9 +43,6 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
- /* dumped to the console via printk) */
-
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
DEFINE_SPINLOCK(pa_dbit_lock);
#endif
@@ -160,6 +158,17 @@ void show_regs(struct pt_regs *regs)
}
}
+static DEFINE_RATELIMIT_STATE(_hppa_rs,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+#define parisc_printk_ratelimited(critical, regs, fmt, ...) { \
+ if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
+ printk(fmt, ##__VA_ARGS__); \
+ show_regs(regs); \
+ } \
+}
+
+
static void do_show_stack(struct unwind_frame_info *info)
{
int i = 1;
@@ -229,12 +238,10 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
if (err == 0)
return; /* STFU */
- printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+ parisc_printk_ratelimited(1, regs,
+ KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
-#ifdef PRINT_USER_FAULTS
- /* XXX for debugging only */
- show_regs(regs);
-#endif
+
return;
}
@@ -321,14 +328,11 @@ static void handle_break(struct pt_regs *regs)
(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
}
-#ifdef PRINT_USER_FAULTS
- if (unlikely(iir != GDB_BREAK_INSN)) {
- printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
+ if (unlikely(iir != GDB_BREAK_INSN))
+ parisc_printk_ratelimited(0, regs,
+ KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
iir & 31, (iir>>13) & ((1<<13)-1),
task_pid_nr(current), current->comm);
- show_regs(regs);
- }
-#endif
/* send standard GDB signal */
handle_gdb_break(regs, TRAP_BRKPT);
@@ -758,11 +762,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
default:
if (user_mode(regs)) {
-#ifdef PRINT_USER_FAULTS
- printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
- task_pid_nr(current), current->comm);
- show_regs(regs);
-#endif
+ parisc_printk_ratelimited(0, regs, KERN_DEBUG
+ "handle_interruption() pid=%d command='%s'\n",
+ task_pid_nr(current), current->comm);
/* SIGBUS, for lack of a better one. */
si.si_signo = SIGBUS;
si.si_code = BUS_OBJERR;
@@ -779,16 +781,10 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (user_mode(regs)) {
if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
-#ifdef PRINT_USER_FAULTS
- if (fault_space == 0)
- printk(KERN_DEBUG "User Fault on Kernel Space ");
- else
- printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
- code);
- printk(KERN_CONT "pid=%d command='%s'\n",
- task_pid_nr(current), current->comm);
- show_regs(regs);
-#endif
+ parisc_printk_ratelimited(0, regs, KERN_DEBUG
+ "User fault %d on space 0x%08lx, pid=%d command='%s'\n",
+ code, fault_space,
+ task_pid_nr(current), current->comm);
si.si_signo = SIGSEGV;
si.si_errno = 0;
si.si_code = SEGV_MAPERR;
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 747550762f3c..3ca9c1131cfe 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -19,10 +19,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
- /* dumped to the console via printk) */
-
-
/* Various important other fields */
#define bit22set(x) (x & 0x00000200)
#define bits23_25set(x) (x & 0x000001c0)
@@ -34,6 +30,8 @@
DEFINE_PER_CPU(struct exception_data, exception_data);
+int show_unhandled_signals = 1;
+
/*
* parisc_acctyp(unsigned int inst) --
* Given a PA-RISC memory access instruction, determine if the
@@ -173,6 +171,32 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long code,
+ unsigned long address, struct task_struct *tsk,
+ struct vm_area_struct *vma)
+{
+ if (!unhandled_signal(tsk, SIGSEGV))
+ return;
+
+ if (!printk_ratelimit())
+ return;
+
+ pr_warn("\n");
+ pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
+ tsk->comm, code, address);
+ print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+ if (vma)
+ pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ vma->vm_start, vma->vm_end);
+
+ show_regs(regs);
+}
+
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
@@ -270,16 +294,8 @@ bad_area:
if (user_mode(regs)) {
struct siginfo si;
-#ifdef PRINT_USER_FAULTS
- printk(KERN_DEBUG "\n");
- printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
- task_pid_nr(tsk), tsk->comm, code, address);
- if (vma) {
- printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
- vma->vm_start, vma->vm_end);
- }
- show_regs(regs);
-#endif
+ show_signal_msg(regs, code, address, tsk, vma);
+
switch (code) {
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 122a580f7322..7e711bdcc6da 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -813,9 +813,6 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
- /* Don't adjust the decrementer if some irq work is pending */
- if (test_irq_work_pending())
- return 0;
__get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
set_dec(evt);
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 253fefe3d1a0..5b51079f3e3b 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -549,7 +549,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
ret = ioda_eeh_phb_reset(hose, option);
} else {
bus = eeh_pe_bus_get(pe);
- if (pci_is_root_bus(bus))
+ if (pci_is_root_bus(bus) ||
+ pci_is_root_bus(bus->parent))
ret = ioda_eeh_root_reset(hose, option);
else
ret = ioda_eeh_bridge_reset(hose, bus->self, option);
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index cf3c0089bef2..23223cd63e54 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -820,6 +820,9 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
else
memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
}
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 0a5aac8a9412..7acb77f7ef1a 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -429,6 +429,9 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
else
memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
}
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 452d3ebd9d0f..e9f8fa9337fe 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -811,7 +811,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
return NULL;
memset(header, 0, sz);
header->pages = sz / PAGE_SIZE;
- hole = sz - (bpfsize + sizeof(*header));
+ hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header));
/* Insert random number of illegal instructions before BPF code
* and make sure the first instruction starts at an even address.
*/
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 0f9e94537eee..1a49ffdf9da9 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -24,7 +24,8 @@
/* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
* The page copy blockops can use 0x6000000 to 0x8000000.
- * The TSB is mapped in the 0x8000000 to 0xa000000 range.
+ * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
+ * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
* The PROM resides in an area spanning 0xf0000000 to 0x100000000.
* The vmalloc area spans 0x100000000 to 0x200000000.
* Since modules need to be in the lowest 32-bits of the address space,
@@ -33,7 +34,8 @@
* 0x400000000.
*/
#define TLBTEMP_BASE _AC(0x0000000006000000,UL)
-#define TSBMAP_BASE _AC(0x0000000008000000,UL)
+#define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
+#define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
#define MODULES_VADDR _AC(0x0000000010000000,UL)
#define MODULES_LEN _AC(0x00000000e0000000,UL)
#define MODULES_END _AC(0x00000000f0000000,UL)
@@ -71,6 +73,23 @@
#include <linux/sched.h>
+extern unsigned long sparc64_valid_addr_bitmap[];
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+static inline bool __kern_addr_valid(unsigned long paddr)
+{
+ if ((paddr >> MAX_PHYS_ADDRESS_BITS) != 0UL)
+ return false;
+ return test_bit(paddr >> ILOG2_4MB, sparc64_valid_addr_bitmap);
+}
+
+static inline bool kern_addr_valid(unsigned long addr)
+{
+ unsigned long paddr = __pa(addr);
+
+ return __kern_addr_valid(paddr);
+}
+
/* Entries per page directory level. */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PMD (1UL << PMD_BITS)
@@ -79,9 +98,12 @@
/* Kernel has a separate 44bit address space. */
#define FIRST_USER_ADDRESS 0
-#define pte_ERROR(e) __builtin_trap()
-#define pmd_ERROR(e) __builtin_trap()
-#define pgd_ERROR(e) __builtin_trap()
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
+ __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
#endif /* !(__ASSEMBLY__) */
@@ -258,8 +280,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
{
unsigned long mask, tmp;
- /* SUN4U: 0x600307ffffffecb8 (negated == 0x9ffcf80000001347)
- * SUN4V: 0x30ffffffffffee17 (negated == 0xcf000000000011e8)
+ /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
+ * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
*
* Even if we use negation tricks the result is still a 6
* instruction sequence, so don't try to play fancy and just
@@ -289,10 +311,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
" .previous\n"
: "=r" (mask), "=r" (tmp)
: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
- _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U | _PAGE_PRESENT_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
_PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
"i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
- _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V | _PAGE_PRESENT_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
_PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -633,7 +655,7 @@ static inline unsigned long pmd_large(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
- return (pte_val(pte) & _PAGE_PMD_HUGE) && pte_present(pte);
+ return pte_val(pte) & _PAGE_PMD_HUGE;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -719,20 +741,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return __pmd(pte_val(pte));
}
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
- unsigned long mask;
-
- if (tlb_type == hypervisor)
- mask = _PAGE_PRESENT_4V;
- else
- mask = _PAGE_PRESENT_4U;
-
- pmd_val(pmd) &= ~mask;
-
- return pmd;
-}
-
static inline pmd_t pmd_mksplitting(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
@@ -757,6 +765,20 @@ static inline int pmd_present(pmd_t pmd)
#define pmd_none(pmd) (!pmd_val(pmd))
+/* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
+ * very simple, it's just the physical address. PTE tables are of
+ * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
+ * the top bits outside of the range of any physical address size we
+ * support are clear as well. We also validate the physical itself.
+ */
+#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) || \
+ !__kern_addr_valid(pmd_val(pmd)))
+
+#define pud_none(pud) (!pud_val(pud))
+
+#define pud_bad(pud) ((pud_val(pud) & ~PAGE_MASK) || \
+ !__kern_addr_valid(pud_val(pud)))
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
@@ -790,10 +812,7 @@ static inline unsigned long __pmd_page(pmd_t pmd)
#define pud_page_vaddr(pud) \
((unsigned long) __va(pud_val(pud)))
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
-#define pmd_bad(pmd) (0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
-#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (0)
#define pud_present(pud) (pud_val(pud) != 0U)
#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
@@ -893,6 +912,10 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd);
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp);
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable);
@@ -919,18 +942,6 @@ extern unsigned long pte_file(pte_t);
extern pte_t pgoff_to_pte(unsigned long);
#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
-extern unsigned long sparc64_valid_addr_bitmap[];
-
-/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
-static inline bool kern_addr_valid(unsigned long addr)
-{
- unsigned long paddr = __pa(addr);
-
- if ((paddr >> 41UL) != 0UL)
- return false;
- return test_bit(paddr >> 22, sparc64_valid_addr_bitmap);
-}
-
extern int page_in_phys_avail(unsigned long paddr);
/*
diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h
index 2230f80d9fe3..90916f955cac 100644
--- a/arch/sparc/include/asm/tsb.h
+++ b/arch/sparc/include/asm/tsb.h
@@ -171,7 +171,8 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \
sethi %hi(4 * 1024 * 1024), REG2; \
- andn REG1, REG2, REG1; \
+ brgez,pn REG1, FAIL_LABEL; \
+ andn REG1, REG2, REG1; \
and VADDR, REG2, REG2; \
brlz,pt REG1, PTE_LABEL; \
or REG1, REG2, REG1; \
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 26b706a1867d..452f04fe8da6 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -282,8 +282,8 @@ sun4v_chip_type:
stx %l2, [%l4 + 0x0]
ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
/* 4MB align */
- srlx %l3, 22, %l3
- sllx %l3, 22, %l3
+ srlx %l3, ILOG2_4MB, %l3
+ sllx %l3, ILOG2_4MB, %l3
stx %l3, [%l4 + 0x8]
/* Leave service as-is, "call-method" */
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 542e96ac4d39..605d49204580 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -277,7 +277,7 @@ kvmap_dtlb_load:
#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
sub %g4, %g5, %g5
- srlx %g5, 22, %g5
+ srlx %g5, ILOG2_4MB, %g5
sethi %hi(vmemmap_table), %g1
sllx %g5, 3, %g5
or %g1, %lo(vmemmap_table), %g1
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 6479256fd5a4..337094556916 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -68,27 +68,16 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
static void die_nmi(const char *str, struct pt_regs *regs, int do_panic)
{
+ int this_cpu = smp_processor_id();
+
if (notify_die(DIE_NMIWATCHDOG, str, regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
return;
- console_verbose();
- bust_spinlocks(1);
-
- printk(KERN_EMERG "%s", str);
- printk(" on CPU%d, ip %08lx, registers:\n",
- smp_processor_id(), regs->tpc);
- show_regs(regs);
- dump_stack();
-
- bust_spinlocks(0);
-
if (do_panic || panic_on_oops)
- panic("Non maskable interrupt");
-
- nmi_exit();
- local_irq_enable();
- do_exit(SIGBUS);
+ panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ else
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
}
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 9781048161ab..745a3633ce14 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -149,7 +149,7 @@ void cpu_panic(void)
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
go[MASTER] = 0;
membar_safe("#StoreLoad");
- spin_lock_irqsave(&itc_sync_lock, flags);
+ raw_spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
@@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu)
membar_safe("#StoreLoad");
}
}
- spin_unlock_irqrestore(&itc_sync_lock, flags);
+ raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
}
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index f7c72b6efc27..d066eb18650c 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -44,7 +44,7 @@ SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
SIGN1(sys32_select, compat_sys_select, %o0)
-SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
+SIGN1(sys32_futex, compat_sys_futex, %o1)
SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index a364000ca1aa..7f41d40b7e6e 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -151,7 +151,7 @@ static ssize_t store_mmustat_enable(struct device *s,
size_t count)
{
unsigned long val, err;
- int ret = sscanf(buf, "%ld", &val);
+ int ret = sscanf(buf, "%lu", &val);
if (ret != 1)
return -EINVAL;
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index 3c1a7cb31579..35ab8b60d256 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -166,17 +166,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
unsigned long compute_effective_address(struct pt_regs *regs,
unsigned int insn, unsigned int rd)
{
+ int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned int rs1 = (insn >> 14) & 0x1f;
unsigned int rs2 = insn & 0x1f;
- int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
+ unsigned long addr;
if (insn & 0x2000) {
maybe_flush_windows(rs1, 0, rd, from_kernel);
- return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
} else {
maybe_flush_windows(rs1, rs2, rd, from_kernel);
- return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
}
+
+ if (!from_kernel && test_thread_flag(TIF_32BIT))
+ addr &= 0xffffffff;
+
+ return addr;
}
/* This is just to make gcc think die_if_kernel does return... */
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 2c20ad63ddbf..30eee6e8a81b 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -236,6 +236,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
*/
VISEntryHalf
+ membar #Sync
alignaddr %o1, %g0, %g0
add %o1, (64 - 1), %o4
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 69bb818fdd79..4ced3fc66130 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -96,38 +96,51 @@ static unsigned int get_user_insn(unsigned long tpc)
pte_t *ptep, pte;
unsigned long pa;
u32 insn = 0;
- unsigned long pstate;
- if (pgd_none(*pgdp))
- goto outret;
+ if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
+ goto out;
pudp = pud_offset(pgdp, tpc);
- if (pud_none(*pudp))
- goto outret;
- pmdp = pmd_offset(pudp, tpc);
- if (pmd_none(*pmdp))
- goto outret;
+ if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
+ goto out;
/* This disables preemption for us as well. */
- __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
- __asm__ __volatile__("wrpr %0, %1, %%pstate"
- : : "r" (pstate), "i" (PSTATE_IE));
- ptep = pte_offset_map(pmdp, tpc);
- pte = *ptep;
- if (!pte_present(pte))
- goto out;
+ local_irq_disable();
+
+ pmdp = pmd_offset(pudp, tpc);
+ if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
+ goto out_irq_enable;
- pa = (pte_pfn(pte) << PAGE_SHIFT);
- pa += (tpc & ~PAGE_MASK);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge(*pmdp)) {
+ if (pmd_trans_splitting(*pmdp))
+ goto out_irq_enable;
- /* Use phys bypass so we don't pollute dtlb/dcache. */
- __asm__ __volatile__("lduwa [%1] %2, %0"
- : "=r" (insn)
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
+ pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
+ pa += tpc & ~HPAGE_MASK;
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
+ __asm__ __volatile__("lduwa [%1] %2, %0"
+ : "=r" (insn)
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
+ } else
+#endif
+ {
+ ptep = pte_offset_map(pmdp, tpc);
+ pte = *ptep;
+ if (pte_present(pte)) {
+ pa = (pte_pfn(pte) << PAGE_SHIFT);
+ pa += (tpc & ~PAGE_MASK);
+
+ /* Use phys bypass so we don't pollute dtlb/dcache. */
+ __asm__ __volatile__("lduwa [%1] %2, %0"
+ : "=r" (insn)
+ : "r" (pa), "i" (ASI_PHYS_USE_EC));
+ }
+ pte_unmap(ptep);
+ }
+out_irq_enable:
+ local_irq_enable();
out:
- pte_unmap(ptep);
- __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
-outret:
return insn;
}
@@ -153,7 +166,8 @@ show_signal_msg(struct pt_regs *regs, int sig, int code,
}
static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
- unsigned int insn, int fault_code)
+ unsigned long fault_addr, unsigned int insn,
+ int fault_code)
{
unsigned long addr;
siginfo_t info;
@@ -161,10 +175,18 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
info.si_code = code;
info.si_signo = sig;
info.si_errno = 0;
- if (fault_code & FAULT_CODE_ITLB)
+ if (fault_code & FAULT_CODE_ITLB) {
addr = regs->tpc;
- else
- addr = compute_effective_address(regs, insn, 0);
+ } else {
+ /* If we were able to probe the faulting instruction, use it
+ * to compute a precise fault address. Otherwise use the fault
+ * time provided address which may only have page granularity.
+ */
+ if (insn)
+ addr = compute_effective_address(regs, insn, 0);
+ else
+ addr = fault_addr;
+ }
info.si_addr = (void __user *) addr;
info.si_trapno = 0;
@@ -239,7 +261,7 @@ static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
/* The si_code was set to make clear whether
* this was a SEGV_MAPERR or SEGV_ACCERR fault.
*/
- do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
+ do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
return;
}
@@ -259,18 +281,6 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
show_regs(regs);
}
-static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
- unsigned long addr)
-{
- static int times;
-
- if (times++ < 10)
- printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
- "reports 64-bit fault address [%lx]\n",
- current->comm, current->pid, addr);
- show_regs(regs);
-}
-
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
@@ -300,10 +310,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
goto intr_or_no_mm;
}
}
- if (unlikely((address >> 32) != 0)) {
- bogus_32bit_fault_address(regs, address);
+ if (unlikely((address >> 32) != 0))
goto intr_or_no_mm;
- }
}
if (regs->tstate & TSTATE_PRIV) {
@@ -525,7 +533,7 @@ do_sigbus:
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
- do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
+ do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index c4d3da68b800..1aed0432c64b 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -73,7 +73,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail;
int refs;
- if (!pmd_large(pmd))
+ if (!(pmd_val(pmd) & _PAGE_VALID))
return 0;
if (write && !pmd_write(pmd))
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index eafbc65c9c47..ed3c969a5f4c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -588,7 +588,7 @@ static void __init remap_kernel(void)
int i, tlb_ent = sparc64_highest_locked_tlbent();
tte_vaddr = (unsigned long) KERNBASE;
- phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
tte_data = kern_large_tte(phys_page);
kern_locked_tte_data = tte_data;
@@ -1881,7 +1881,7 @@ void __init paging_init(void)
BUILD_BUG_ON(NR_CPUS > 4096);
- kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
/* Invalidate both kernel TSBs. */
@@ -1937,7 +1937,7 @@ void __init paging_init(void)
shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
real_end = (unsigned long)_end;
- num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+ num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
num_kernel_image_mappings);
@@ -2094,7 +2094,7 @@ static void __init setup_valid_addr_bitmap_from_pavail(unsigned long *bitmap)
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
- set_bit(old_start >> 22, bitmap);
+ set_bit(old_start >> ILOG2_4MB, bitmap);
goto do_next_page;
}
}
@@ -2143,7 +2143,7 @@ void __init mem_init(void)
addr = PAGE_OFFSET + kern_base;
last = PAGE_ALIGN(kern_size) + addr;
while (addr < last) {
- set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
+ set_bit(__pa(addr) >> ILOG2_4MB, sparc64_valid_addr_bitmap);
addr += PAGE_SIZE;
}
@@ -2267,7 +2267,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
void *block;
if (!(*vmem_pp & _PAGE_VALID)) {
- block = vmemmap_alloc_block(1UL << 22, node);
+ block = vmemmap_alloc_block(1UL << ILOG2_4MB, node);
if (!block)
return -ENOMEM;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b12cb5e72812..b89aba217e3b 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -134,7 +134,7 @@ no_cache_flush:
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
- pmd_t pmd, bool exec)
+ pmd_t pmd)
{
unsigned long end;
pte_t *pte;
@@ -142,8 +142,11 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
pte = pte_offset_map(&pmd, vaddr);
end = vaddr + HPAGE_SIZE;
while (vaddr < end) {
- if (pte_val(*pte) & _PAGE_VALID)
+ if (pte_val(*pte) & _PAGE_VALID) {
+ bool exec = pte_exec(*pte);
+
tlb_batch_add_one(mm, vaddr, exec);
+ }
pte++;
vaddr += PAGE_SIZE;
}
@@ -177,19 +180,30 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
}
if (!pmd_none(orig)) {
- pte_t orig_pte = __pte(pmd_val(orig));
- bool exec = pte_exec(orig_pte);
-
addr &= HPAGE_MASK;
if (pmd_trans_huge(orig)) {
+ pte_t orig_pte = __pte(pmd_val(orig));
+ bool exec = pte_exec(orig_pte);
+
tlb_batch_add_one(mm, addr, exec);
tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
} else {
- tlb_batch_pmd_scan(mm, addr, orig, exec);
+ tlb_batch_pmd_scan(mm, addr, orig);
}
}
}
+void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
+{
+ pmd_t entry = *pmdp;
+
+ pmd_val(entry) &= ~_PAGE_VALID;
+
+ set_pmd_at(vma->vm_mm, address, pmdp, entry);
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+}
+
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pgtable)
{
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index f5d506fdddad..fe19b81acc09 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -133,7 +133,19 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
mm->context.tsb_block[tsb_idx].tsb_nentries =
tsb_bytes / sizeof(struct tsb);
- base = TSBMAP_BASE;
+ switch (tsb_idx) {
+ case MM_TSB_BASE:
+ base = TSBMAP_8K_BASE;
+ break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+ case MM_TSB_HUGE:
+ base = TSBMAP_4M_BASE;
+ break;
+#endif
+ default:
+ BUG();
+ }
+
tte = pgprot_val(PAGE_KERNEL_LOCKED);
tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index ce6ad7e6a7d7..33f71b01fd22 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -79,6 +79,7 @@ else
UTS_MACHINE := x86_64
CHECKFLAGS += -D__x86_64__ -m64
+ biarch := -m64
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index abb9eba61b50..dbe8dd2fe247 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -71,7 +71,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) . \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|_end\)$$/\#define VO_\2 0x\1/p'
quiet_cmd_voffset = VOFFSET $@
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
@@ -80,7 +80,7 @@ targets += voffset.h
$(obj)/voffset.h: vmlinux FORCE
$(call if_changed,voffset)
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 17684615374b..57ab74df7eea 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -354,7 +354,7 @@ static void parse_elf(void *output)
free(phdrs);
}
-asmlinkage void *decompress_kernel(void *rmode, memptr heap,
+asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index b18df579c0e9..36f7125945e3 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -63,6 +63,7 @@
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
+extern int boot_hpet_disable;
extern u8 hpet_blockid;
extern int hpet_force_user;
extern u8 hpet_msi_disable;
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index a8091216963b..68c05398bba9 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -52,6 +52,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
+ ptep_clear_flush(vma, addr, ptep);
}
static inline int huge_pte_none(pte_t pte)
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index c827ace3121b..fcf2b3ae1bf0 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -384,7 +384,7 @@
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 3a2ae4c88948..31368207837c 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -31,7 +31,7 @@ static char temp_stack[4096];
*
* Wrapper around acpi_enter_sleep_state() to be called by assmebly.
*/
-acpi_status asmlinkage x86_acpi_enter_sleep_state(u8 state)
+acpi_status asmlinkage __visible x86_acpi_enter_sleep_state(u8 state)
{
return acpi_enter_sleep_state(state);
}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d23aa82e7a7b..992060e09897 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2189,7 +2189,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
cfg->move_in_progress = 0;
}
-asmlinkage void smp_irq_move_cleanup_interrupt(void)
+asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d921b7ee6595..36a1bb6d1ee0 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -429,14 +429,14 @@ static inline void __smp_thermal_interrupt(void)
smp_thermal_vector();
}
-asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_thermal_interrupt();
exiting_ack_irq();
}
-asmlinkage void smp_trace_thermal_interrupt(struct pt_regs *regs)
+asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index fe6b1c86645b..7245980186ee 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -24,14 +24,14 @@ static inline void __smp_threshold_interrupt(void)
mce_threshold_vector();
}
-asmlinkage void smp_threshold_interrupt(void)
+asmlinkage __visible void smp_threshold_interrupt(void)
{
entering_irq();
__smp_threshold_interrupt();
exiting_ack_irq();
}
-asmlinkage void smp_trace_threshold_interrupt(void)
+asmlinkage __visible void smp_trace_threshold_interrupt(void)
{
entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index aa333d966886..adb02aa62af5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -169,7 +169,6 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
{
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
- FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
EVENT_CONSTRAINT_END
};
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 384df5105fbc..136ac74dee82 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -27,6 +27,7 @@
static int __init x86_rdrand_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_RDRAND);
+ setup_clear_cpu_cap(X86_FEATURE_RDSEED);
return 1;
}
__setup("nordrand", x86_rdrand_setup);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 6e2537c32190..6cda0baeac9d 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -17,6 +17,7 @@
#include <asm/dma.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
+#include <asm/hpet.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/irq_remapping.h>
@@ -530,6 +531,15 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
}
}
+static void __init force_disable_hpet(int num, int slot, int func)
+{
+#ifdef CONFIG_HPET_TIMER
+ boot_hpet_disable = 1;
+ pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n");
+#endif
+}
+
+
#define QFLAG_APPLY_ONCE 0x1
#define QFLAG_APPLIED 0x2
#define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED)
@@ -567,6 +577,12 @@ static struct chipset early_qrk[] __initdata = {
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
QFLAG_APPLY_ONCE, intel_graphics_stolen },
+ /*
+ * HPET on current version of Baytrail platform has accuracy
+ * problems, disable it for now:
+ */
+ { PCI_VENDOR_ID_INTEL, 0x0f00,
+ PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
{}
};
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index c61a14a4a310..d6c1b9836995 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
reserve_ebda_region();
}
-asmlinkage void __init i386_start_kernel(void)
+asmlinkage __visible void __init i386_start_kernel(void)
{
sanitize_boot_params(&boot_params);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 85126ccbdf6b..068054f4bf20 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
}
}
-asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
+asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
{
int i;
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 8d80ae011603..4177bfbc80b0 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -88,7 +88,7 @@ static inline void hpet_clear_mapping(void)
/*
* HPET command line enable / disable
*/
-static int boot_hpet_disable;
+int boot_hpet_disable;
int hpet_force_user;
static int hpet_verbose;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index af1d14a9ebda..dcbbaa165bde 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -20,6 +20,8 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
+int sysctl_ldt16 = 0;
+
#ifdef CONFIG_SMP
static void flush_ldt(void *current_mm)
{
@@ -234,7 +236,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
* IRET leaking the high bits of the kernel stack address.
*/
#ifdef CONFIG_X86_64
- if (!ldt_info.seg_32bit) {
+ if (!ldt_info.seg_32bit && !sysctl_ldt16) {
error = -EINVAL;
goto out_unlock;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9c0280f93d05..898d077617a9 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
asmlinkage extern void ret_from_fork(void);
-asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
+__visible DEFINE_PER_CPU(unsigned long, old_rsp);
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 3399d3a99730..52b1157c53eb 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -191,6 +191,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
+ /* Certec */
+ { /* Handle problems with rebooting on Certec BPC600 */
+ .callback = set_pci_reboot,
+ .ident = "Certec BPC600",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Certec"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "BPC600"),
+ },
+ },
+
/* Dell */
{ /* Handle problems with rebooting on Dell DXP061 */
.callback = set_bios_reboot,
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 7c3a5a61f2e4..be8e1bde07aa 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -168,7 +168,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
* this function calls the 'stop' function on all other CPUs in the system.
*/
-asmlinkage void smp_reboot_interrupt(void)
+asmlinkage __visible void smp_reboot_interrupt(void)
{
ack_APIC_irq();
irq_enter();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 57409f6b8c62..f73b5d435bdc 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -357,7 +357,7 @@ exit:
* for scheduling or signal handling. The actual stack switch is done in
* entry.S
*/
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage __visible __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
{
struct pt_regs *regs = eregs;
/* Did already sync */
@@ -601,11 +601,11 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
#endif
}
-asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
+asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
{
}
-asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
+asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
{
}
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 5edc34b5b951..b99b9ad8540c 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -36,7 +36,7 @@ static int irq_routing_comply = 1;
* and vice versa.
*/
-asmlinkage unsigned long vsmp_save_fl(void)
+asmlinkage __visible unsigned long vsmp_save_fl(void)
{
unsigned long flags = native_save_fl();
@@ -56,7 +56,7 @@ __visible void vsmp_restore_fl(unsigned long flags)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-asmlinkage void vsmp_irq_disable(void)
+asmlinkage __visible void vsmp_irq_disable(void)
{
unsigned long flags = native_save_fl();
@@ -64,7 +64,7 @@ asmlinkage void vsmp_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-asmlinkage void vsmp_irq_enable(void)
+asmlinkage __visible void vsmp_irq_enable(void)
{
unsigned long flags = native_save_fl();
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
index f9c6e56e14b5..9531fbb123ba 100644
--- a/arch/x86/kernel/vsyscall_gtod.c
+++ b/arch/x86/kernel/vsyscall_gtod.c
@@ -43,7 +43,7 @@ void update_vsyscall(struct timekeeper *tk)
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->xtime_nsec
- + (tk->wall_to_monotonic.tv_nsec
+ + ((u64)tk->wall_to_monotonic.tv_nsec
<< tk->shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->shift)) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8b8fc0b792ba..b6c0bacca9bd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -280,7 +280,7 @@ int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
-asmlinkage void kvm_spurious_fault(void)
+asmlinkage __visible void kvm_spurious_fault(void)
{
/* Fault while not rebooting. We want the trace. */
BUG();
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index ad1fb5f53925..aae94132bc24 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
* flags word contains all kind of stuff, but in practice Linux only cares
* about the interrupt flag. Our "save_flags()" just returns that.
*/
-asmlinkage unsigned long lguest_save_fl(void)
+asmlinkage __visible unsigned long lguest_save_fl(void)
{
return lguest_data.irq_enabled;
}
/* Interrupts go off... */
-asmlinkage void lguest_irq_disable(void)
+asmlinkage __visible void lguest_irq_disable(void)
{
lguest_data.irq_enabled = 0;
}
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index db9db446b71a..43623739c7cf 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -76,7 +76,7 @@ static inline int __flip_bit(u32 msr, u8 bit, bool set)
if (m1.q == m.q)
return 0;
- err = msr_write(msr, &m);
+ err = msr_write(msr, &m1);
if (err)
return err;
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index a5449089cd9f..9e6545f269e5 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -302,7 +302,7 @@ static struct {
0x242 in div_Xsig.S
*/
-asmlinkage void FPU_exception(int n)
+asmlinkage __visible void FPU_exception(int n)
{
int i, int_type;
@@ -492,7 +492,7 @@ int real_2op_NaN(FPU_REG const *b, u_char tagb,
/* Invalid arith operation on Valid registers */
/* Returns < 0 if the exception is unmasked */
-asmlinkage int arith_invalid(int deststnr)
+asmlinkage __visible int arith_invalid(int deststnr)
{
EXCEPTION(EX_Invalid);
@@ -507,7 +507,7 @@ asmlinkage int arith_invalid(int deststnr)
}
/* Divide a finite number by zero */
-asmlinkage int FPU_divide_by_zero(int deststnr, u_char sign)
+asmlinkage __visible int FPU_divide_by_zero(int deststnr, u_char sign)
{
FPU_REG *dest = &st(deststnr);
int tag = TAG_Valid;
@@ -539,7 +539,7 @@ int set_precision_flag(int flags)
}
/* This may be called often, so keep it lean */
-asmlinkage void set_precision_flag_up(void)
+asmlinkage __visible void set_precision_flag_up(void)
{
if (control_word & CW_Precision)
partial_status |= (SW_Precision | SW_C1); /* The masked response */
@@ -548,7 +548,7 @@ asmlinkage void set_precision_flag_up(void)
}
/* This may be called often, so keep it lean */
-asmlinkage void set_precision_flag_down(void)
+asmlinkage __visible void set_precision_flag_down(void)
{
if (control_word & CW_Precision) { /* The masked response */
partial_status &= ~SW_C1;
@@ -557,7 +557,7 @@ asmlinkage void set_precision_flag_down(void)
EXCEPTION(EX_Precision);
}
-asmlinkage int denormal_operand(void)
+asmlinkage __visible int denormal_operand(void)
{
if (control_word & CW_Denormal) { /* The masked response */
partial_status |= SW_Denorm_Op;
@@ -568,7 +568,7 @@ asmlinkage int denormal_operand(void)
}
}
-asmlinkage int arith_overflow(FPU_REG *dest)
+asmlinkage __visible int arith_overflow(FPU_REG *dest)
{
int tag = TAG_Valid;
@@ -596,7 +596,7 @@ asmlinkage int arith_overflow(FPU_REG *dest)
}
-asmlinkage int arith_underflow(FPU_REG *dest)
+asmlinkage __visible int arith_underflow(FPU_REG *dest)
{
int tag = TAG_Valid;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 92aef8fdac2f..080f3f071bb0 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -135,7 +135,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
header->pages = sz / PAGE_SIZE;
- hole = sz - (proglen + sizeof(*header));
+ hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
/* insert a random number of int3 instructions before BPF code */
*image_ptr = &header->image[prandom_u32() % hole];
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 81b506d5befd..524142117296 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -14,48 +14,92 @@
static const struct font_desc *font;
static u32 efi_x, efi_y;
+static void *efi_fb;
+static bool early_efi_keep;
-static __init void early_efi_clear_scanline(unsigned int y)
+/*
+ * efi earlyprintk need use early_ioremap to map the framebuffer.
+ * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should
+ * be used instead. ioremap will be available after paging_init() which is
+ * earlier than initcall callbacks. Thus adding this early initcall function
+ * early_efi_map_fb to map the whole efi framebuffer.
+ */
+static __init int early_efi_map_fb(void)
{
- unsigned long base, *dst;
- u16 len;
+ unsigned long base, size;
+
+ if (!early_efi_keep)
+ return 0;
base = boot_params.screen_info.lfb_base;
- len = boot_params.screen_info.lfb_linelength;
+ size = boot_params.screen_info.lfb_size;
+ efi_fb = ioremap(base, size);
+
+ return efi_fb ? 0 : -ENOMEM;
+}
+early_initcall(early_efi_map_fb);
+
+/*
+ * early_efi_map maps efi framebuffer region [start, start + len -1]
+ * In case earlyprintk=efi,keep we have the whole framebuffer mapped already
+ * so just return the offset efi_fb + start.
+ */
+static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
+{
+ unsigned long base;
+
+ base = boot_params.screen_info.lfb_base;
+
+ if (efi_fb)
+ return (efi_fb + start);
+ else
+ return early_ioremap(base + start, len);
+}
- dst = early_ioremap(base + y*len, len);
+static __init_refok void early_efi_unmap(void *addr, unsigned long len)
+{
+ if (!efi_fb)
+ early_iounmap(addr, len);
+}
+
+static void early_efi_clear_scanline(unsigned int y)
+{
+ unsigned long *dst;
+ u16 len;
+
+ len = boot_params.screen_info.lfb_linelength;
+ dst = early_efi_map(y*len, len);
if (!dst)
return;
memset(dst, 0, len);
- early_iounmap(dst, len);
+ early_efi_unmap(dst, len);
}
-static __init void early_efi_scroll_up(void)
+static void early_efi_scroll_up(void)
{
- unsigned long base, *dst, *src;
+ unsigned long *dst, *src;
u16 len;
u32 i, height;
- base = boot_params.screen_info.lfb_base;
len = boot_params.screen_info.lfb_linelength;
height = boot_params.screen_info.lfb_height;
for (i = 0; i < height - font->height; i++) {
- dst = early_ioremap(base + i*len, len);
+ dst = early_efi_map(i*len, len);
if (!dst)
return;
- src = early_ioremap(base + (i + font->height) * len, len);
+ src = early_efi_map((i + font->height) * len, len);
if (!src) {
- early_iounmap(dst, len);
+ early_efi_unmap(dst, len);
return;
}
memmove(dst, src, len);
- early_iounmap(src, len);
- early_iounmap(dst, len);
+ early_efi_unmap(src, len);
+ early_efi_unmap(dst, len);
}
}
@@ -79,16 +123,14 @@ static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h)
}
}
-static __init void
+static void
early_efi_write(struct console *con, const char *str, unsigned int num)
{
struct screen_info *si;
- unsigned long base;
unsigned int len;
const char *s;
void *dst;
- base = boot_params.screen_info.lfb_base;
si = &boot_params.screen_info;
len = si->lfb_linelength;
@@ -109,7 +151,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
for (h = 0; h < font->height; h++) {
unsigned int n, x;
- dst = early_ioremap(base + (efi_y + h) * len, len);
+ dst = early_efi_map((efi_y + h) * len, len);
if (!dst)
return;
@@ -123,7 +165,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
s++;
}
- early_iounmap(dst, len);
+ early_efi_unmap(dst, len);
}
num -= count;
@@ -179,6 +221,9 @@ static __init int early_efi_setup(struct console *con, char *options)
for (i = 0; i < (yres - efi_y) / font->height; i++)
early_efi_scroll_up();
+ /* early_console_register will unset CON_BOOT in case ,keep */
+ if (!(con->flags & CON_BOOT))
+ early_efi_keep = true;
return 0;
}
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index ff0174dda810..a9acde72d4ed 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -75,7 +75,7 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
return 0;
}
-asmlinkage int xo1_do_sleep(u8 sleep_state)
+asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
{
void *pgd_addr = __va(read_cr3());
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 304fca20d96e..35e2bb6c0f37 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -23,7 +23,7 @@
extern __visible const void __nosave_begin, __nosave_end;
/* Defined in hibernate_asm_64.S */
-extern asmlinkage int restore_image(void);
+extern asmlinkage __visible int restore_image(void);
/*
* Address to jump to in the last phase of restore in order to get to the image
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 00348980a3a6..e1f220e3ca68 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -39,6 +39,7 @@
#ifdef CONFIG_X86_64
#define vdso_enabled sysctl_vsyscall32
#define arch_setup_additional_pages syscall32_setup_pages
+extern int sysctl_ldt16;
#endif
/*
@@ -249,6 +250,13 @@ static struct ctl_table abi_table2[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "ldt16",
+ .data = &sysctl_ldt16,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
{}
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 201d09a7c46b..c34bfc4bbe7f 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1515,7 +1515,7 @@ static void __init xen_pvh_early_guest_init(void)
}
/* First C function to be called on Xen boot */
-asmlinkage void __init xen_start_kernel(void)
+asmlinkage __visible void __init xen_start_kernel(void)
{
struct physdev_set_iopl set_iopl;
int rc;
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 08f763de26fe..a1207cb6472a 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -23,7 +23,7 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
}
-asmlinkage unsigned long xen_save_fl(void)
+asmlinkage __visible unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
@@ -63,7 +63,7 @@ __visible void xen_restore_fl(unsigned long flags)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
-asmlinkage void xen_irq_disable(void)
+asmlinkage __visible void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
@@ -74,7 +74,7 @@ asmlinkage void xen_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
-asmlinkage void xen_irq_enable(void)
+asmlinkage __visible void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e4a4145926f6..1039fb9ff5f5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg;
int i;
- mutex_lock(&blkcg_pol_mutex);
+ /*
+ * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
+ * which ends up putting cgroup's internal cgroup_tree_mutex under
+ * it; however, cgroup_tree_mutex is nested above cgroup file
+ * active protection and grabbing blkcg_pol_mutex from a cgroup
+ * file operation creates a possible circular dependency. cgroup
+ * internal locking is planned to go through further simplification
+ * and this issue should go away soon. For now, let's trylock
+ * blkcg_pol_mutex and restart the write on failure.
+ *
+ * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
+ */
+ if (!mutex_trylock(&blkcg_pol_mutex))
+ return restart_syscall();
spin_lock_irq(&blkcg->lock);
/*
diff --git a/drivers/Makefile b/drivers/Makefile
index d05d81b19b50..7183b6af5dac 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -119,7 +119,7 @@ obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
-obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/
+obj-$(CONFIG_ARCH_SHMOBILE) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ab686b310100..a34a22841002 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -47,6 +47,23 @@ config ACPI_SLEEP
depends on SUSPEND || HIBERNATION
default y
+config ACPI_PROCFS_POWER
+ bool "Deprecated power /proc/acpi directories"
+ depends on PROC_FS
+ help
+ For backwards compatibility, this option allows
+ deprecated power /proc/acpi/ directories to exist, even when
+ they have been replaced by functions in /sys.
+ The deprecated directories (and their replacements) include:
+ /proc/acpi/battery/* (/sys/class/power_supply/*)
+ /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
+ This option has no effect on /proc/acpi/ directories
+ and functions, which do not yet exist in /sys
+ This option, together with the proc directories, will be
+ deleted in the future.
+
+ Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
config ACPI_EC_DEBUGFS
tristate "EC read/write access through /sys/kernel/debug/ec"
default n
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 0331f91d56e6..bce34afadcd0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -47,6 +47,7 @@ acpi-y += sysfs.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
+acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
ifdef CONFIG_ACPI_VIDEO
acpi-y += video_detect.o
endif
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 2c01c1da29ce..c67f6f5ad611 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
+static int acpi_ac_add(struct acpi_device *device);
+static int acpi_ac_remove(struct acpi_device *device);
+static void acpi_ac_notify(struct acpi_device *device, u32 event);
+
+static const struct acpi_device_id ac_device_ids[] = {
+ {"ACPI0003", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int acpi_ac_resume(struct device *dev);
+#endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+
static int ac_sleep_before_get_state_ms;
+static struct acpi_driver acpi_ac_driver = {
+ .name = "ac",
+ .class = ACPI_AC_CLASS,
+ .ids = ac_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = acpi_ac_add,
+ .remove = acpi_ac_remove,
+ .notify = acpi_ac_notify,
+ },
+ .drv.pm = &acpi_ac_pm,
+};
+
struct acpi_ac {
struct power_supply charger;
- struct platform_device *pdev;
+ struct acpi_device * device;
unsigned long long state;
struct notifier_block battery_nb;
};
@@ -69,10 +97,12 @@ struct acpi_ac {
static int acpi_ac_get_state(struct acpi_ac *ac)
{
- acpi_status status;
- acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev);
+ acpi_status status = AE_OK;
+
+ if (!ac)
+ return -EINVAL;
- status = acpi_evaluate_integer(handle, "_PSR", NULL,
+ status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
@@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = {
Driver Model
-------------------------------------------------------------------------- */
-static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
+static void acpi_ac_notify(struct acpi_device *device, u32 event)
{
- struct acpi_ac *ac = data;
- struct acpi_device *adev;
+ struct acpi_ac *ac = acpi_driver_data(device);
if (!ac)
return;
@@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
- adev = ACPI_COMPANION(&ac->pdev->dev);
- acpi_bus_generate_netlink_event(adev->pnp.device_class,
- dev_name(&ac->pdev->dev),
- event, (u32) ac->state);
- acpi_notifier_call_chain(adev, event, (u32) ac->state);
+ acpi_bus_generate_netlink_event(device->pnp.device_class,
+ dev_name(&device->dev), event,
+ (u32) ac->state);
+ acpi_notifier_call_chain(device, event, (u32) ac->state);
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
}
@@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = {
{},
};
-static int acpi_ac_probe(struct platform_device *pdev)
+static int acpi_ac_add(struct acpi_device *device)
{
int result = 0;
struct acpi_ac *ac = NULL;
- struct acpi_device *adev;
- if (!pdev)
- return -EINVAL;
- adev = ACPI_COMPANION(&pdev->dev);
- if (!adev)
- return -ENODEV;
+ if (!device)
+ return -EINVAL;
ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
if (!ac)
return -ENOMEM;
- strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
- strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
- ac->pdev = pdev;
- platform_set_drvdata(pdev, ac);
+ ac->device = device;
+ strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_AC_CLASS);
+ device->driver_data = ac;
result = acpi_ac_get_state(ac);
if (result)
goto end;
- ac->charger.name = acpi_device_bid(adev);
+ ac->charger.name = acpi_device_bid(device);
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
- result = power_supply_register(&pdev->dev, &ac->charger);
+ result = power_supply_register(&ac->device->dev, &ac->charger);
if (result)
goto end;
- result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
- ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac);
- if (result) {
- power_supply_unregister(&ac->charger);
- goto end;
- }
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
- acpi_device_name(adev), acpi_device_bid(adev),
+ acpi_device_name(device), acpi_device_bid(device),
ac->state ? "on-line" : "off-line");
ac->battery_nb.notifier_call = acpi_ac_battery_notify;
@@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev)
if (!dev)
return -EINVAL;
- ac = platform_get_drvdata(to_platform_device(dev));
+ ac = acpi_driver_data(to_acpi_device(dev));
if (!ac)
return -EINVAL;
@@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev)
#else
#define acpi_ac_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
-static int acpi_ac_remove(struct platform_device *pdev)
+static int acpi_ac_remove(struct acpi_device *device)
{
- struct acpi_ac *ac;
+ struct acpi_ac *ac = NULL;
+
- if (!pdev)
+ if (!device || !acpi_driver_data(device))
return -EINVAL;
- acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
- ACPI_ALL_NOTIFY, acpi_ac_notify_handler);
+ ac = acpi_driver_data(device);
- ac = platform_get_drvdata(pdev);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
unregister_acpi_notifier(&ac->battery_nb);
@@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev)
return 0;
}
-static const struct acpi_device_id acpi_ac_match[] = {
- { "ACPI0003", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
-
-static struct platform_driver acpi_ac_driver = {
- .probe = acpi_ac_probe,
- .remove = acpi_ac_remove,
- .driver = {
- .name = "acpi-ac",
- .owner = THIS_MODULE,
- .pm = &acpi_ac_pm_ops,
- .acpi_match_table = ACPI_PTR(acpi_ac_match),
- },
-};
-
static int __init acpi_ac_init(void)
{
int result;
@@ -316,7 +315,7 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
- result = platform_driver_register(&acpi_ac_driver);
+ result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0)
return -ENODEV;
@@ -325,7 +324,7 @@ static int __init acpi_ac_init(void)
static void __exit acpi_ac_exit(void)
{
- platform_driver_unregister(&acpi_ac_driver);
+ acpi_bus_unregister_driver(&acpi_ac_driver);
}
module_init(acpi_ac_init);
module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index dbfe49e5fd63..1d4950388fa1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform");
static const struct acpi_device_id acpi_platform_device_ids[] = {
{ "PNP0D40" },
- { "ACPI0003" },
{ "VPC2004" },
{ "BCM4752" },
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b06f5f55ada9..52c81c49cc7d 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -405,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device,
goto err;
pr->dev = dev;
- dev->offline = pr->flags.need_hotplug_init;
/* Trigger the processor driver's .probe() if present. */
if (device_attach(dev) >= 0)
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 49bbc71fad54..a08a448068dd 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
* address. Although ACPICA adheres to the ACPI specification which
* requires the use of the corresponding 64-bit address if it is non-zero,
* some machines have been found to have a corrupted non-zero 64-bit
- * address. Default is FALSE, do not favor the 32-bit addresses.
+ * address. Default is TRUE, favor the 32-bit addresses.
*/
-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
/*
* Optionally truncate I/O addresses to 16 bits. Provides compatibility
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index a4702eee91a8..9fb85f38de90 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
u32 table_count;
struct acpi_table_header *table;
acpi_physical_address address;
+ acpi_physical_address rsdt_address;
u32 length;
u8 *table_entry;
acpi_status status;
@@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* as per the ACPI specification.
*/
address = (acpi_physical_address) rsdp->xsdt_physical_address;
+ rsdt_address =
+ (acpi_physical_address) rsdp->rsdt_physical_address;
table_entry_size = ACPI_XSDT_ENTRY_SIZE;
} else {
/* Root table is an RSDT (32-bit physical addresses) */
address = (acpi_physical_address) rsdp->rsdt_physical_address;
+ rsdt_address = address;
table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
@@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
/* Fall back to the RSDT */
- address =
- (acpi_physical_address) rsdp->rsdt_physical_address;
+ address = rsdt_address;
table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 9a2c63b20050..6e7b2a12860d 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -36,6 +36,12 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/uaccess.h>
+#endif
+
#include <linux/acpi.h>
#include <linux/power_supply.h>
@@ -64,6 +70,19 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_battery_dir(void);
+extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
+
+enum acpi_battery_files {
+ info_tag = 0,
+ state_tag,
+ alarm_tag,
+ ACPI_BATTERY_NUMFILES,
+};
+
+#endif
+
static const struct acpi_device_id battery_device_ids[] = {
{"PNP0C0A", 0},
{"", 0},
@@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
+#ifdef CONFIG_ACPI_PROCFS_POWER
+inline char *acpi_battery_units(struct acpi_battery *battery)
+{
+ return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
+ "mA" : "mW";
+}
+#endif
+
/* --------------------------------------------------------------------------
Battery Management
-------------------------------------------------------------------------- */
@@ -717,6 +744,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
}
/* --------------------------------------------------------------------------
+ FS Interface (/proc)
+ -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static struct proc_dir_entry *acpi_battery_dir;
+
+static int acpi_battery_print_info(struct seq_file *seq, int result)
+{
+ struct acpi_battery *battery = seq->private;
+
+ if (result)
+ goto end;
+
+ seq_printf(seq, "present: %s\n",
+ acpi_battery_present(battery) ? "yes" : "no");
+ if (!acpi_battery_present(battery))
+ goto end;
+ if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "design capacity: unknown\n");
+ else
+ seq_printf(seq, "design capacity: %d %sh\n",
+ battery->design_capacity,
+ acpi_battery_units(battery));
+
+ if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "last full capacity: unknown\n");
+ else
+ seq_printf(seq, "last full capacity: %d %sh\n",
+ battery->full_charge_capacity,
+ acpi_battery_units(battery));
+
+ seq_printf(seq, "battery technology: %srechargeable\n",
+ (!battery->technology)?"non-":"");
+
+ if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "design voltage: unknown\n");
+ else
+ seq_printf(seq, "design voltage: %d mV\n",
+ battery->design_voltage);
+ seq_printf(seq, "design capacity warning: %d %sh\n",
+ battery->design_capacity_warning,
+ acpi_battery_units(battery));
+ seq_printf(seq, "design capacity low: %d %sh\n",
+ battery->design_capacity_low,
+ acpi_battery_units(battery));
+ seq_printf(seq, "cycle count: %i\n", battery->cycle_count);
+ seq_printf(seq, "capacity granularity 1: %d %sh\n",
+ battery->capacity_granularity_1,
+ acpi_battery_units(battery));
+ seq_printf(seq, "capacity granularity 2: %d %sh\n",
+ battery->capacity_granularity_2,
+ acpi_battery_units(battery));
+ seq_printf(seq, "model number: %s\n", battery->model_number);
+ seq_printf(seq, "serial number: %s\n", battery->serial_number);
+ seq_printf(seq, "battery type: %s\n", battery->type);
+ seq_printf(seq, "OEM info: %s\n", battery->oem_info);
+ end:
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery info\n");
+ return result;
+}
+
+static int acpi_battery_print_state(struct seq_file *seq, int result)
+{
+ struct acpi_battery *battery = seq->private;
+
+ if (result)
+ goto end;
+
+ seq_printf(seq, "present: %s\n",
+ acpi_battery_present(battery) ? "yes" : "no");
+ if (!acpi_battery_present(battery))
+ goto end;
+
+ seq_printf(seq, "capacity state: %s\n",
+ (battery->state & 0x04) ? "critical" : "ok");
+ if ((battery->state & 0x01) && (battery->state & 0x02))
+ seq_printf(seq,
+ "charging state: charging/discharging\n");
+ else if (battery->state & 0x01)
+ seq_printf(seq, "charging state: discharging\n");
+ else if (battery->state & 0x02)
+ seq_printf(seq, "charging state: charging\n");
+ else
+ seq_printf(seq, "charging state: charged\n");
+
+ if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "present rate: unknown\n");
+ else
+ seq_printf(seq, "present rate: %d %s\n",
+ battery->rate_now, acpi_battery_units(battery));
+
+ if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "remaining capacity: unknown\n");
+ else
+ seq_printf(seq, "remaining capacity: %d %sh\n",
+ battery->capacity_now, acpi_battery_units(battery));
+ if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
+ seq_printf(seq, "present voltage: unknown\n");
+ else
+ seq_printf(seq, "present voltage: %d mV\n",
+ battery->voltage_now);
+ end:
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery state\n");
+
+ return result;
+}
+
+static int acpi_battery_print_alarm(struct seq_file *seq, int result)
+{
+ struct acpi_battery *battery = seq->private;
+
+ if (result)
+ goto end;
+
+ if (!acpi_battery_present(battery)) {
+ seq_printf(seq, "present: no\n");
+ goto end;
+ }
+ seq_printf(seq, "alarm: ");
+ if (!battery->alarm)
+ seq_printf(seq, "unsupported\n");
+ else
+ seq_printf(seq, "%u %sh\n", battery->alarm,
+ acpi_battery_units(battery));
+ end:
+ if (result)
+ seq_printf(seq, "ERROR: Unable to read battery alarm\n");
+ return result;
+}
+
+static ssize_t acpi_battery_write_alarm(struct file *file,
+ const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ int result = 0;
+ char alarm_string[12] = { '\0' };
+ struct seq_file *m = file->private_data;
+ struct acpi_battery *battery = m->private;
+
+ if (!battery || (count > sizeof(alarm_string) - 1))
+ return -EINVAL;
+ if (!acpi_battery_present(battery)) {
+ result = -ENODEV;
+ goto end;
+ }
+ if (copy_from_user(alarm_string, buffer, count)) {
+ result = -EFAULT;
+ goto end;
+ }
+ alarm_string[count] = '\0';
+ battery->alarm = simple_strtol(alarm_string, NULL, 0);
+ result = acpi_battery_set_alarm(battery);
+ end:
+ if (!result)
+ return count;
+ return result;
+}
+
+typedef int(*print_func)(struct seq_file *seq, int result);
+
+static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
+ acpi_battery_print_info,
+ acpi_battery_print_state,
+ acpi_battery_print_alarm,
+};
+
+static int acpi_battery_read(int fid, struct seq_file *seq)
+{
+ struct acpi_battery *battery = seq->private;
+ int result = acpi_battery_update(battery);
+ return acpi_print_funcs[fid](seq, result);
+}
+
+#define DECLARE_FILE_FUNCTIONS(_name) \
+static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
+{ \
+ return acpi_battery_read(_name##_tag, seq); \
+} \
+static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
+}
+
+DECLARE_FILE_FUNCTIONS(info);
+DECLARE_FILE_FUNCTIONS(state);
+DECLARE_FILE_FUNCTIONS(alarm);
+
+#undef DECLARE_FILE_FUNCTIONS
+
+#define FILE_DESCRIPTION_RO(_name) \
+ { \
+ .name = __stringify(_name), \
+ .mode = S_IRUGO, \
+ .ops = { \
+ .open = acpi_battery_##_name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+#define FILE_DESCRIPTION_RW(_name) \
+ { \
+ .name = __stringify(_name), \
+ .mode = S_IFREG | S_IRUGO | S_IWUSR, \
+ .ops = { \
+ .open = acpi_battery_##_name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .write = acpi_battery_write_##_name, \
+ .release = single_release, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
+static const struct battery_file {
+ struct file_operations ops;
+ umode_t mode;
+ const char *name;
+} acpi_battery_file[] = {
+ FILE_DESCRIPTION_RO(info),
+ FILE_DESCRIPTION_RO(state),
+ FILE_DESCRIPTION_RW(alarm),
+};
+
+#undef FILE_DESCRIPTION_RO
+#undef FILE_DESCRIPTION_RW
+
+static int acpi_battery_add_fs(struct acpi_device *device)
+{
+ struct proc_dir_entry *entry = NULL;
+ int i;
+
+ printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
+ " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+ if (!acpi_device_dir(device)) {
+ acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
+ acpi_battery_dir);
+ if (!acpi_device_dir(device))
+ return -ENODEV;
+ }
+
+ for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
+ entry = proc_create_data(acpi_battery_file[i].name,
+ acpi_battery_file[i].mode,
+ acpi_device_dir(device),
+ &acpi_battery_file[i].ops,
+ acpi_driver_data(device));
+ if (!entry)
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void acpi_battery_remove_fs(struct acpi_device *device)
+{
+ int i;
+ if (!acpi_device_dir(device))
+ return;
+ for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
+ remove_proc_entry(acpi_battery_file[i].name,
+ acpi_device_dir(device));
+
+ remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
+ acpi_device_dir(device) = NULL;
+}
+
+#endif
+
+/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device)
result = acpi_battery_update(battery);
if (result)
goto fail;
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ result = acpi_battery_add_fs(device);
+#endif
+ if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_battery_remove_fs(device);
+#endif
+ goto fail;
+ }
printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device)
return -EINVAL;
battery = acpi_driver_data(device);
unregister_pm_notifier(&battery->pm_nb);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_battery_remove_fs(device);
+#endif
sysfs_remove_battery(battery);
mutex_destroy(&battery->lock);
mutex_destroy(&battery->sysfs_lock);
@@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
if (dmi_check_system(bat_dmi_table))
battery_bix_broken_package = 1;
- acpi_bus_register_driver(&acpi_battery_driver);
+
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_battery_dir = acpi_lock_battery_dir();
+ if (!acpi_battery_dir)
+ return;
+#endif
+ if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
+ return;
+ }
+ return;
}
static int __init acpi_battery_init(void)
@@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
acpi_bus_unregister_driver(&acpi_battery_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+ acpi_unlock_battery_dir(acpi_battery_dir);
+#endif
}
module_init(acpi_battery_init);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index afec4526c48a..3d8413d02a97 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 7737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
@@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
},
},
+ /*
+ * Without this this EEEpc exports a non working WMI interface, with
+ * this it exports a working "good old" eeepc_laptop interface, fixing
+ * both brightness control, and rfkill not working.
+ */
+ {
+ .callback = dmi_enable_osi_linux,
+ .ident = "Asus EEE PC 1015PX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+ },
+ },
{}
};
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
new file mode 100644
index 000000000000..6c9ee68e46fb
--- /dev/null
+++ b/drivers/acpi/cm_sbs.c
@@ -0,0 +1,105 @@
+/*
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+#define PREFIX "ACPI: "
+
+ACPI_MODULE_NAME("cm_sbs");
+#define ACPI_AC_CLASS "ac_adapter"
+#define ACPI_BATTERY_CLASS "battery"
+#define _COMPONENT ACPI_SBS_COMPONENT
+static struct proc_dir_entry *acpi_ac_dir;
+static struct proc_dir_entry *acpi_battery_dir;
+
+static DEFINE_MUTEX(cm_sbs_mutex);
+
+static int lock_ac_dir_cnt;
+static int lock_battery_dir_cnt;
+
+struct proc_dir_entry *acpi_lock_ac_dir(void)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (!acpi_ac_dir)
+ acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
+ if (acpi_ac_dir) {
+ lock_ac_dir_cnt++;
+ } else {
+ printk(KERN_ERR PREFIX
+ "Cannot create %s\n", ACPI_AC_CLASS);
+ }
+ mutex_unlock(&cm_sbs_mutex);
+ return acpi_ac_dir;
+}
+EXPORT_SYMBOL(acpi_lock_ac_dir);
+
+void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (acpi_ac_dir_param)
+ lock_ac_dir_cnt--;
+ if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
+ remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
+ acpi_ac_dir = NULL;
+ }
+ mutex_unlock(&cm_sbs_mutex);
+}
+EXPORT_SYMBOL(acpi_unlock_ac_dir);
+
+struct proc_dir_entry *acpi_lock_battery_dir(void)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (!acpi_battery_dir) {
+ acpi_battery_dir =
+ proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
+ }
+ if (acpi_battery_dir) {
+ lock_battery_dir_cnt++;
+ } else {
+ printk(KERN_ERR PREFIX
+ "Cannot create %s\n", ACPI_BATTERY_CLASS);
+ }
+ mutex_unlock(&cm_sbs_mutex);
+ return acpi_battery_dir;
+}
+EXPORT_SYMBOL(acpi_lock_battery_dir);
+
+void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
+{
+ mutex_lock(&cm_sbs_mutex);
+ if (acpi_battery_dir_param)
+ lock_battery_dir_cnt--;
+ if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
+ && acpi_battery_dir) {
+ remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
+ acpi_battery_dir = NULL;
+ }
+ mutex_unlock(&cm_sbs_mutex);
+ return;
+}
+EXPORT_SYMBOL(acpi_unlock_battery_dir);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8b6990e417ec..f8bc5a755dda 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
- .ident = "ThinkPad T430s",
+ .ident = "ThinkPad T430 and T430s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
},
},
{
@@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
},
{
- .callback = video_set_use_native_backlight,
+ .callback = video_set_use_native_backlight,
.ident = "ThinkPad X1 Carbon",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
.ident = "Dell Inspiron 7520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
},
},
{
@@ -513,6 +513,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire 5742G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
.ident = "Acer Aspire V5-431",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c2706047337f..0033fafc470b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -815,7 +815,7 @@ config PATA_AT32
config PATA_AT91
tristate "PATA support for AT91SAM9260"
- depends on ARM && ARCH_AT91
+ depends on ARM && SOC_AT91SAM9
help
This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 71e15b73513d..60707814a84b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev)
return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
}
+static bool ahci_broken_devslp(struct pci_dev *pdev)
+{
+ /* device with broken DEVSLP but still showing SDS capability */
+ static const struct pci_device_id ids[] = {
+ { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */
+ {}
+ };
+
+ return pci_match_id(ids, pdev);
+}
+
#ifdef CONFIG_ATA_ACPI
static void ahci_gtf_filter_workaround(struct ata_host *host)
{
@@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
+ /* must set flag prior to save config in order to take effect */
+ if (ahci_broken_devslp(pdev))
+ hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
+
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index b5eb886da226..af63c75c2001 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -236,6 +236,7 @@ enum {
port start (wait until
error-handling stage) */
AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */
+ AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 497c7abe1c7d..8befeb69eeb1 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -29,9 +29,25 @@
#include "ahci.h"
enum {
- PORT_PHY_CTL = 0x178, /* Port0 PHY Control */
- PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */
- HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+ /* Timer 1-ms Register */
+ IMX_TIMER1MS = 0x00e0,
+ /* Port0 PHY Control Register */
+ IMX_P0PHYCR = 0x0178,
+ IMX_P0PHYCR_TEST_PDDQ = 1 << 20,
+ IMX_P0PHYCR_CR_READ = 1 << 19,
+ IMX_P0PHYCR_CR_WRITE = 1 << 18,
+ IMX_P0PHYCR_CR_CAP_DATA = 1 << 17,
+ IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16,
+ /* Port0 PHY Status Register */
+ IMX_P0PHYSR = 0x017c,
+ IMX_P0PHYSR_CR_ACK = 1 << 18,
+ IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0,
+ /* Lane0 Output Status Register */
+ IMX_LANE0_OUT_STAT = 0x2003,
+ IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1,
+ /* Clock Reset Register */
+ IMX_CLOCK_RESET = 0x7f3f,
+ IMX_CLOCK_RESET_RESET = 1 << 0,
};
enum ahci_imx_type {
@@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support
static void ahci_imx_host_stop(struct ata_host *host);
+static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert)
+{
+ int timeout = 10;
+ u32 crval;
+ u32 srval;
+
+ /* Assert or deassert the bit */
+ crval = readl(mmio + IMX_P0PHYCR);
+ if (assert)
+ crval |= bit;
+ else
+ crval &= ~bit;
+ writel(crval, mmio + IMX_P0PHYCR);
+
+ /* Wait for the cr_ack signal */
+ do {
+ srval = readl(mmio + IMX_P0PHYSR);
+ if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK)
+ break;
+ usleep_range(100, 200);
+ } while (--timeout);
+
+ return timeout ? 0 : -ETIMEDOUT;
+}
+
+static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio)
+{
+ u32 crval = addr;
+ int ret;
+
+ /* Supply the address on cr_data_in */
+ writel(crval, mmio + IMX_P0PHYCR);
+
+ /* Assert the cr_cap_addr signal */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true);
+ if (ret)
+ return ret;
+
+ /* Deassert cr_cap_addr */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int imx_phy_reg_write(u16 val, void __iomem *mmio)
+{
+ u32 crval = val;
+ int ret;
+
+ /* Supply the data on cr_data_in */
+ writel(crval, mmio + IMX_P0PHYCR);
+
+ /* Assert the cr_cap_data signal */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true);
+ if (ret)
+ return ret;
+
+ /* Deassert cr_cap_data */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false);
+ if (ret)
+ return ret;
+
+ if (val & IMX_CLOCK_RESET_RESET) {
+ /*
+ * In case we're resetting the phy, it's unable to acknowledge,
+ * so we return immediately here.
+ */
+ crval |= IMX_P0PHYCR_CR_WRITE;
+ writel(crval, mmio + IMX_P0PHYCR);
+ goto out;
+ }
+
+ /* Assert the cr_write signal */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true);
+ if (ret)
+ return ret;
+
+ /* Deassert cr_write */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false);
+ if (ret)
+ return ret;
+
+out:
+ return 0;
+}
+
+static int imx_phy_reg_read(u16 *val, void __iomem *mmio)
+{
+ int ret;
+
+ /* Assert the cr_read signal */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true);
+ if (ret)
+ return ret;
+
+ /* Capture the data from cr_data_out[] */
+ *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT;
+
+ /* Deassert cr_read */
+ ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int imx_sata_phy_reset(struct ahci_host_priv *hpriv)
+{
+ void __iomem *mmio = hpriv->mmio;
+ int timeout = 10;
+ u16 val;
+ int ret;
+
+ /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */
+ ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio);
+ if (ret)
+ return ret;
+ ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio);
+ if (ret)
+ return ret;
+
+ /* Wait for PHY RX_PLL to be stable */
+ do {
+ usleep_range(100, 200);
+ ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio);
+ if (ret)
+ return ret;
+ ret = imx_phy_reg_read(&val, mmio);
+ if (ret)
+ return ret;
+ if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE)
+ break;
+ } while (--timeout);
+
+ return timeout ? 0 : -ETIMEDOUT;
+}
+
static int imx_sata_enable(struct ahci_host_priv *hpriv)
{
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+ struct device *dev = &imxpriv->ahci_pdev->dev;
int ret;
if (imxpriv->no_device)
@@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
IMX6Q_GPR13_SATA_MPLL_CLK_EN,
IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+
+ usleep_range(100, 200);
+
+ ret = imx_sata_phy_reset(hpriv);
+ if (ret) {
+ dev_err(dev, "failed to reset phy: %d\n", ret);
+ goto disable_regulator;
+ }
}
usleep_range(1000, 2000);
@@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap)
* without full reset once the pddq mode is enabled making it
* impossible to use as part of libata LPM.
*/
- reg_val = readl(mmio + PORT_PHY_CTL);
- writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+ reg_val = readl(mmio + IMX_P0PHYCR);
+ writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
imx_sata_disable(hpriv);
imxpriv->no_device = true;
}
@@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
if (!imxpriv)
return -ENOMEM;
+ imxpriv->ahci_pdev = pdev;
imxpriv->no_device = false;
imxpriv->first_time = true;
imxpriv->type = (enum ahci_imx_type)of_id->data;
@@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
/*
* Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
- * and IP vendor specific register HOST_TIMER1MS.
+ * and IP vendor specific register IMX_TIMER1MS.
* Configure CAP_SSS (support stagered spin up).
* Implement the port0.
* Get the ahb clock rate, and configure the TIMER1MS register.
@@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev)
}
reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
- writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
+ writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
if (ret)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 6bd4f660b4e1..b9861453fc81 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev,
cap &= ~HOST_CAP_SNTF;
}
+ if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) {
+ dev_info(dev,
+ "controller can't do DEVSLP, turning off\n");
+ cap2 &= ~HOST_CAP2_SDS;
+ cap2 &= ~HOST_CAP2_SADM;
+ }
+
if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
cap |= HOST_CAP_FBS;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 943cc8b83e59..ea83828bfea9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq,
static void ata_port_detach(struct ata_port *ap)
{
unsigned long flags;
+ struct ata_link *link;
+ struct ata_device *dev;
if (!ap->ops->error_handler)
goto skip_eh;
@@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap)
cancel_delayed_work_sync(&ap->hotplug_task);
skip_eh:
+ /* clean up zpodd on port removal */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ALL) {
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
+ }
+ }
if (ap->pmp_link) {
int i;
for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 293e2e0a0a87..00b73448b22e 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -56,6 +56,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/debugfs.h>
+#include <linux/log2.h>
/*
* DDR target is the same on all platforms.
@@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus,
*/
if ((u64)base < wend && end > wbase)
return 0;
-
- /*
- * Check if target/attribute conflicts
- */
- if (target == wtarget && attr == wattr)
- return 0;
}
return 1;
@@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
mbus->soc->win_cfg_offset(win);
u32 ctrl, remap_addr;
+ if (!is_power_of_2(size)) {
+ WARN(true, "Invalid MBus window size: 0x%zx\n", size);
+ return -EINVAL;
+ }
+
+ if ((base & (phys_addr_t)(size - 1)) != 0) {
+ WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base,
+ size);
+ return -EINVAL;
+ }
+
ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
(attr << WIN_CTRL_ATTR_SHIFT) |
(target << WIN_CTRL_TGT_SHIFT) |
@@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
win, (unsigned long long)wbase,
(unsigned long long)(wbase + wsize), wtarget, wattr);
+ if (!is_power_of_2(wsize) ||
+ ((wbase & (u64)(wsize - 1)) != 0))
+ seq_puts(seq, " (Invalid base/size!!)");
+
if (win < mbus->soc->num_remappable_wins) {
seq_printf(seq, " (remap %016llx)\n",
(unsigned long long)wremap);
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 8121b4c70ede..b29703324e94 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -730,6 +730,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
agp_copy_info(agp_bridge, &kerninfo);
+ memset(&userinfo, 0, sizeof(userinfo));
userinfo.version.major = kerninfo.version.major;
userinfo.version.minor = kerninfo.version.minor;
userinfo.bridge_id = kerninfo.device->vendor |
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6b75713d953a..102c50d38902 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -995,8 +995,11 @@ retry:
ibytes = min_t(size_t, ibytes, have_bytes - reserved);
if (ibytes < min)
ibytes = 0;
- entropy_count = max_t(int, 0,
- entropy_count - (ibytes << (ENTROPY_SHIFT + 3)));
+ if (have_bytes >= ibytes + reserved)
+ entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+ else
+ entropy_count = reserved << (ENTROPY_SHIFT + 3);
+
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index b3ea223585bd..61dcc8011ec7 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent)
/* Cache TPM ACPI handle and version string */
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
ppi_callback, NULL, NULL, &tpm_ppi_handle);
- if (tpm_ppi_handle == NULL)
- return -ENODEV;
-
- return sysfs_create_group(parent, &ppi_attr_grp);
+ return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0;
}
void tpm_remove_ppi(struct kobject *parent)
{
- sysfs_remove_group(parent, &ppi_attr_grp);
+ if (tpm_ppi_handle)
+ sysfs_remove_group(parent, &ppi_attr_grp);
}
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index c7607feb18dd..54a06526f64f 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */
static bool clk_requires_trigger(struct kona_clk *bcm_clk)
{
- struct peri_clk_data *peri = bcm_clk->peri;
+ struct peri_clk_data *peri = bcm_clk->u.peri;
struct bcm_clk_sel *sel;
struct bcm_clk_div *div;
@@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
u32 limit;
BUG_ON(bcm_clk->type != bcm_clk_peri);
- peri = bcm_clk->peri;
+ peri = bcm_clk->u.peri;
name = bcm_clk->name;
range = bcm_clk->ccu->range;
@@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
div = &peri->div;
if (divider_exists(div)) {
- if (div->offset > limit) {
+ if (div->u.s.offset > limit) {
pr_err("%s: bad divider offset for %s (%u > %u)\n",
- __func__, name, div->offset, limit);
+ __func__, name, div->u.s.offset, limit);
return false;
}
}
div = &peri->pre_div;
if (divider_exists(div)) {
- if (div->offset > limit) {
+ if (div->u.s.offset > limit) {
pr_err("%s: bad pre-divider offset for %s "
"(%u > %u)\n",
- __func__, name, div->offset, limit);
+ __func__, name, div->u.s.offset, limit);
return false;
}
}
@@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
{
if (divider_is_fixed(div)) {
/* Any fixed divider value but 0 is OK */
- if (div->fixed == 0) {
+ if (div->u.fixed == 0) {
pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
field_name, clock_name);
return false;
}
return true;
}
- if (!bitfield_valid(div->shift, div->width, field_name, clock_name))
+ if (!bitfield_valid(div->u.s.shift, div->u.s.width,
+ field_name, clock_name))
return false;
if (divider_has_fraction(div))
- if (div->frac_width > div->width) {
+ if (div->u.s.frac_width > div->u.s.width) {
pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
__func__, field_name, clock_name,
- div->frac_width, div->width);
+ div->u.s.frac_width, div->u.s.width);
return false;
}
@@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name,
*/
static bool kona_dividers_valid(struct kona_clk *bcm_clk)
{
- struct peri_clk_data *peri = bcm_clk->peri;
+ struct peri_clk_data *peri = bcm_clk->u.peri;
struct bcm_clk_div *div;
struct bcm_clk_div *pre_div;
u32 limit;
@@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk)
limit = BITS_PER_BYTE * sizeof(u32);
- return div->frac_width + pre_div->frac_width <= limit;
+ return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
}
@@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk)
if (!peri_clk_data_offsets_valid(bcm_clk))
return false;
- peri = bcm_clk->peri;
+ peri = bcm_clk->u.peri;
name = bcm_clk->name;
gate = &peri->gate;
if (gate_exists(gate) && !gate_valid(gate, "gate", name))
@@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk)
{
switch (bcm_clk->type) {
case bcm_clk_peri:
- peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data);
+ peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
break;
default:
break;
}
- bcm_clk->data = NULL;
+ bcm_clk->u.data = NULL;
bcm_clk->type = bcm_clk_none;
}
@@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name,
break;
}
bcm_clk->type = type;
- bcm_clk->data = data;
+ bcm_clk->u.data = data;
/* Make sure everything makes sense before we set it up */
if (!kona_clk_valid(bcm_clk)) {
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index e3d339e08309..db11a87449f2 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor)
/* Convert a divider into the scaled divisor value it represents. */
static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
{
- return (u64)reg_div + ((u64)1 << div->frac_width);
+ return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
}
/*
@@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
BUG_ON(billionths >= BILLION);
combined = (u64)div_value * BILLION + billionths;
- combined <<= div->frac_width;
+ combined <<= div->u.s.frac_width;
return do_div_round_closest(combined, BILLION);
}
@@ -87,7 +87,7 @@ static inline u64
scaled_div_min(struct bcm_clk_div *div)
{
if (divider_is_fixed(div))
- return (u64)div->fixed;
+ return (u64)div->u.fixed;
return scaled_div_value(div, 0);
}
@@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div)
u32 reg_div;
if (divider_is_fixed(div))
- return (u64)div->fixed;
+ return (u64)div->u.fixed;
- reg_div = ((u32)1 << div->width) - 1;
+ reg_div = ((u32)1 << div->u.s.width) - 1;
return scaled_div_value(div, reg_div);
}
@@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div)
BUG_ON(scaled_div < scaled_div_min(div));
BUG_ON(scaled_div > scaled_div_max(div));
- return (u32)(scaled_div - ((u64)1 << div->frac_width));
+ return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
}
/* Return a rate scaled for use when dividing by a scaled divisor. */
@@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate)
if (divider_is_fixed(div))
return (u64)rate;
- return (u64)rate << div->frac_width;
+ return (u64)rate << div->u.s.frac_width;
}
/* CCU access */
@@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
u32 reg_div;
if (divider_is_fixed(div))
- return (u64)div->fixed;
+ return (u64)div->u.fixed;
flags = ccu_lock(ccu);
- reg_val = __ccu_read(ccu, div->offset);
+ reg_val = __ccu_read(ccu, div->u.s.offset);
ccu_unlock(ccu, flags);
/* Extract the full divider field from the register value */
- reg_div = bitfield_extract(reg_val, div->shift, div->width);
+ reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
/* Return the scaled divisor value it represents */
return scaled_div_value(div, reg_div);
@@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
* state was defined in the device tree, we just find out
* what its current value is rather than updating it.
*/
- if (div->scaled_div == BAD_SCALED_DIV_VALUE) {
- reg_val = __ccu_read(ccu, div->offset);
- reg_div = bitfield_extract(reg_val, div->shift, div->width);
- div->scaled_div = scaled_div_value(div, reg_div);
+ if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ reg_div = bitfield_extract(reg_val, div->u.s.shift,
+ div->u.s.width);
+ div->u.s.scaled_div = scaled_div_value(div, reg_div);
return 0;
}
/* Convert the scaled divisor to the value we need to record */
- reg_div = divider(div, div->scaled_div);
+ reg_div = divider(div, div->u.s.scaled_div);
/* Clock needs to be enabled before changing the rate */
enabled = __is_clk_gate_enabled(ccu, gate);
@@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
}
/* Replace the divider value and record the result */
- reg_val = __ccu_read(ccu, div->offset);
- reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div);
- __ccu_write(ccu, div->offset, reg_val);
+ reg_val = __ccu_read(ccu, div->u.s.offset);
+ reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
+ reg_div);
+ __ccu_write(ccu, div->u.s.offset, reg_val);
/* If the trigger fails we still want to disable the gate */
if (!__clk_trigger(ccu, trig))
@@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
BUG_ON(divider_is_fixed(div));
- previous = div->scaled_div;
+ previous = div->u.s.scaled_div;
if (previous == scaled_div)
return 0; /* No change */
- div->scaled_div = scaled_div;
+ div->u.s.scaled_div = scaled_div;
flags = ccu_lock(ccu);
__ccu_write_enable(ccu);
@@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
ccu_unlock(ccu, flags);
if (ret)
- div->scaled_div = previous; /* Revert the change */
+ div->u.s.scaled_div = previous; /* Revert the change */
return ret;
@@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
static int kona_peri_clk_enable(struct clk_hw *hw)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true);
}
@@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw)
static void kona_peri_clk_disable(struct clk_hw *hw)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
(void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false);
}
@@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw)
static int kona_peri_clk_is_enabled(struct clk_hw *hw)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct bcm_clk_gate *gate = &bcm_clk->peri->gate;
+ struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
}
@@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct peri_clk_data *data = bcm_clk->peri;
+ struct peri_clk_data *data = bcm_clk->u.peri;
return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
parent_rate);
@@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct bcm_clk_div *div = &bcm_clk->peri->div;
+ struct bcm_clk_div *div = &bcm_clk->u.peri->div;
if (!divider_exists(div))
return __clk_get_rate(hw->clk);
/* Quietly avoid a zero rate */
- return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div,
+ return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
rate ? rate : 1, *parent_rate, NULL);
}
static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct peri_clk_data *data = bcm_clk->peri;
+ struct peri_clk_data *data = bcm_clk->u.peri;
struct bcm_clk_sel *sel = &data->sel;
struct bcm_clk_trig *trig;
int ret;
@@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct peri_clk_data *data = bcm_clk->peri;
+ struct peri_clk_data *data = bcm_clk->u.peri;
u8 index;
index = selector_read_index(bcm_clk->ccu, &data->sel);
@@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct kona_clk *bcm_clk = to_kona_clk(hw);
- struct peri_clk_data *data = bcm_clk->peri;
+ struct peri_clk_data *data = bcm_clk->u.peri;
struct bcm_clk_div *div = &data->div;
u64 scaled_div = 0;
int ret;
@@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = {
static bool __peri_clk_init(struct kona_clk *bcm_clk)
{
struct ccu_data *ccu = bcm_clk->ccu;
- struct peri_clk_data *peri = bcm_clk->peri;
+ struct peri_clk_data *peri = bcm_clk->u.peri;
const char *name = bcm_clk->name;
struct bcm_clk_trig *trig;
diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h
index 5e139adc3dc5..dee690951bb6 100644
--- a/drivers/clk/bcm/clk-kona.h
+++ b/drivers/clk/bcm/clk-kona.h
@@ -57,7 +57,7 @@
#define divider_exists(div) FLAG_TEST(div, DIV, EXISTS)
#define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED)
#define divider_has_fraction(div) (!divider_is_fixed(div) && \
- (div)->frac_width > 0)
+ (div)->u.s.frac_width > 0)
#define selector_exists(sel) ((sel)->width != 0)
#define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS)
@@ -244,9 +244,9 @@ struct bcm_clk_div {
u32 frac_width; /* field fraction width */
u64 scaled_div; /* scaled divider value */
- };
+ } s;
u32 fixed; /* non-zero fixed divider value */
- };
+ } u;
u32 flags; /* BCM_CLK_DIV_FLAGS_* below */
};
@@ -263,28 +263,28 @@ struct bcm_clk_div {
/* A fixed (non-zero) divider */
#define FIXED_DIVIDER(_value) \
{ \
- .fixed = (_value), \
+ .u.fixed = (_value), \
.flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \
}
/* A divider with an integral divisor */
#define DIVIDER(_offset, _shift, _width) \
{ \
- .offset = (_offset), \
- .shift = (_shift), \
- .width = (_width), \
- .scaled_div = BAD_SCALED_DIV_VALUE, \
+ .u.s.offset = (_offset), \
+ .u.s.shift = (_shift), \
+ .u.s.width = (_width), \
+ .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
.flags = FLAG(DIV, EXISTS), \
}
/* A divider whose divisor has an integer and fractional part */
#define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \
{ \
- .offset = (_offset), \
- .shift = (_shift), \
- .width = (_width), \
- .frac_width = (_frac_width), \
- .scaled_div = BAD_SCALED_DIV_VALUE, \
+ .u.s.offset = (_offset), \
+ .u.s.shift = (_shift), \
+ .u.s.width = (_width), \
+ .u.s.frac_width = (_frac_width), \
+ .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \
.flags = FLAG(DIV, EXISTS), \
}
@@ -380,7 +380,7 @@ struct kona_clk {
union {
void *data;
struct peri_clk_data *peri;
- };
+ } u;
};
#define to_kona_clk(_hw) \
container_of(_hw, struct kona_clk, hw)
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index ec22112e569f..4637697c139f 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
return true;
}
+static int _round_up_table(const struct clk_div_table *table, int div)
+{
+ const struct clk_div_table *clkt;
+ int up = _get_table_maxdiv(table);
+
+ for (clkt = table; clkt->div; clkt++) {
+ if (clkt->div == div)
+ return clkt->div;
+ else if (clkt->div < div)
+ continue;
+
+ if ((clkt->div - div) < (up - div))
+ up = clkt->div;
+ }
+
+ return up;
+}
+
+static int _div_round_up(struct clk_divider *divider,
+ unsigned long parent_rate, unsigned long rate)
+{
+ int div = DIV_ROUND_UP(parent_rate, rate);
+
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ div = __roundup_pow_of_two(div);
+ if (divider->table)
+ div = _round_up_table(divider->table, div);
+
+ return div;
+}
+
static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate)
{
@@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
- bestdiv = DIV_ROUND_UP(parent_rate, rate);
+ bestdiv = _div_round_up(divider, parent_rate, rate);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
return bestdiv;
@@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
u32 val;
div = DIV_ROUND_UP(parent_rate, rate);
+
+ if (!_is_valid_div(divider, div))
+ return -EINVAL;
+
value = _get_val(divider, div);
if (value > div_mask(divider))
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dff0373f53c1..7cf2c093cc54 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(__clk_register);
-static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes. It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
int i, ret;
+ struct clk *clk;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk) {
+ pr_err("%s: could not allocate clk\n", __func__);
+ ret = -ENOMEM;
+ goto fail_out;
+ }
clk->name = kstrdup(hw->init->name, GFP_KERNEL);
if (!clk->name) {
@@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
ret = __clk_init(dev, clk);
if (!ret)
- return 0;
+ return clk;
fail_parent_names_copy:
while (--i >= 0)
@@ -2035,36 +2054,6 @@ fail_parent_names_copy:
fail_parent_names:
kfree(clk->name);
fail_name:
- return ret;
-}
-
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
-struct clk *clk_register(struct device *dev, struct clk_hw *hw)
-{
- int ret;
- struct clk *clk;
-
- clk = kzalloc(sizeof(*clk), GFP_KERNEL);
- if (!clk) {
- pr_err("%s: could not allocate clk\n", __func__);
- ret = -ENOMEM;
- goto fail_out;
- }
-
- ret = _clk_register(dev, hw, clk);
- if (!ret)
- return clk;
-
kfree(clk);
fail_out:
return ERR_PTR(ret);
@@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk)
if (!hlist_empty(&clk->children)) {
struct clk *child;
+ struct hlist_node *t;
/* Reparent all children to the orphan list. */
- hlist_for_each_entry(child, &clk->children, child_node)
+ hlist_for_each_entry_safe(child, t, &clk->children, child_node)
clk_set_parent(child, NULL);
}
@@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister);
static void devm_clk_release(struct device *dev, void *res)
{
- clk_unregister(res);
+ clk_unregister(*(struct clk **)res);
}
/**
@@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res)
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
{
struct clk *clk;
- int ret;
+ struct clk **clkp;
- clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
- if (!clk)
+ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ if (!clkp)
return ERR_PTR(-ENOMEM);
- ret = _clk_register(dev, hw, clk);
- if (!ret) {
- devres_add(dev, clk);
+ clk = clk_register(dev, hw);
+ if (!IS_ERR(clk)) {
+ *clkp = clk;
+ devres_add(dev, clkp);
} else {
- devres_free(clk);
- clk = ERR_PTR(ret);
+ devres_free(clkp);
}
return clk;
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 2e5810c88d11..1f6324e29a80 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name,
static void __init cpg_mstp_clocks_init(struct device_node *np)
{
struct mstp_clock_group *group;
+ const char *idxname;
struct clk **clks;
unsigned int i;
@@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
for (i = 0; i < MSTP_MAX_CLOCKS; ++i)
clks[i] = ERR_PTR(-ENOENT);
+ if (of_find_property(np, "clock-indices", &i))
+ idxname = "clock-indices";
+ else
+ idxname = "renesas,clock-indices";
+
for (i = 0; i < MSTP_MAX_CLOCKS; ++i) {
const char *parent_name;
const char *name;
@@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
continue;
parent_name = of_clk_get_parent_name(np, i);
- ret = of_property_read_u32_index(np, "renesas,clock-indices", i,
- &clkidx);
+ ret = of_property_read_u32_index(np, idxname, i, &clkidx);
if (parent_name == NULL || ret < 0)
break;
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 88dafb5e9627..de6da957a09d 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -20,6 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include "clk.h"
@@ -43,6 +44,8 @@
#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw)
+void __iomem *clk_mgr_base_addr;
+
static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
unsigned long parent_rate)
{
@@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
const char *clk_name = node->name;
const char *parent_name[SOCFPGA_MAX_PARENTS];
struct clk_init_data init;
+ struct device_node *clkmgr_np;
int rc;
int i = 0;
@@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
if (WARN_ON(!pll_clk))
return NULL;
+ clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+ clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
+ BUG_ON(!clk_mgr_base_addr);
pll_clk->hw.reg = clk_mgr_base_addr + reg;
of_property_read_string(node, "clock-output-names", &clk_name);
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
index 35a960a993f9..43db947e5f0e 100644
--- a/drivers/clk/socfpga/clk.c
+++ b/drivers/clk/socfpga/clk.c
@@ -17,28 +17,11 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include "clk.h"
-void __iomem *clk_mgr_base_addr;
-
-static const struct of_device_id socfpga_child_clocks[] __initconst = {
- { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, },
- { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, },
- { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, },
- {},
-};
-
-static void __init socfpga_clkmgr_init(struct device_node *node)
-{
- clk_mgr_base_addr = of_iomap(node, 0);
- of_clk_init(socfpga_child_clocks);
-}
-CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init);
+CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init);
+CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init);
+CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 0d20241e0770..e1769addf435 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1718,7 +1718,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
"pll_re_vco");
} else {
val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL);
- pll_writel(val, pll_params->aux_reg, pll);
+ pll_writel(val_aux, pll_params->aux_reg, pll);
}
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 099967302bf2..eab8ccfe6beb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,6 +37,7 @@
#define BYT_RATIOS 0x66a
#define BYT_VIDS 0x66b
#define BYT_TURBO_RATIOS 0x66c
+#define BYT_TURBO_VIDS 0x66d
#define FRAC_BITS 6
@@ -70,8 +71,9 @@ struct pstate_data {
};
struct vid_data {
- int32_t min;
- int32_t max;
+ int min;
+ int max;
+ int turbo;
int32_t ratio;
};
@@ -359,14 +361,14 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return (value >> 8) & 0xFF;
+ return (value >> 8) & 0x3F;
}
static int byt_get_max_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return (value >> 16) & 0xFF;
+ return (value >> 16) & 0x3F;
}
static int byt_get_turbo_pstate(void)
@@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
vid = fp_toint(vid_fp);
+ if (pstate > cpudata->pstate.max_pstate)
+ vid = cpudata->vid.turbo;
+
val |= vid;
wrmsrl(MSR_IA32_PERF_CTL, val);
@@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata)
{
u64 value;
+
rdmsrl(BYT_VIDS, value);
- cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
- cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
+ cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
+ cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
cpudata->vid.ratio = div_fp(
cpudata->vid.max - cpudata->vid.min,
int_tofp(cpudata->pstate.max_pstate -
cpudata->pstate.min_pstate));
+
+ rdmsrl(BYT_TURBO_VIDS, value);
+ cpudata->vid.turbo = value & 0x7f;
}
@@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_vid)
pstate_funcs.get_vid(cpu);
-
- /*
- * goto max pstate so we don't slow up boot if we are built-in if we are
- * a module we will take care of it during normal operation
- */
- intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
@@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu);
- if (!cpu->pstate.current_pstate) {
- all_cpu_data[cpunum] = NULL;
- kfree(cpu);
- return -ENODATA;
- }
cpu->cpu = cpunum;
@@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu->timer.expires = jiffies + HZ/100;
intel_pstate_busy_pid_reset(cpu);
intel_pstate_sample(cpu);
- intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
add_timer_on(&cpu->timer, cpunum);
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index f0bc31f5db27..d4add8621944 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
set_cpus_allowed_ptr(current, &cpus_allowed);
/* setting the cpu frequency */
- clk_set_rate(policy->clk, freq);
+ clk_set_rate(policy->clk, freq * 1000);
return 0;
}
@@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
- ret = clk_set_rate(cpuclk, rate);
+ ret = clk_set_rate(cpuclk, rate * 1000);
if (ret) {
clk_put(cpuclk);
return ret;
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 9f25f5296029..0eabd81e1a90 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -16,9 +16,13 @@
char *tmp; \
\
tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
- sprintf(tmp, format, param); \
- strcat(str, tmp); \
- kfree(tmp); \
+ if (likely(tmp)) { \
+ sprintf(tmp, format, param); \
+ strcat(str, tmp); \
+ kfree(tmp); \
+ } else { \
+ strcat(str, "kmalloc failure in SPRINTFCAT"); \
+ } \
}
static void report_jump_idx(u32 status, char *outstr)
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 3ee852c9925b..071c2c969eec 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -756,6 +756,7 @@ static const struct {
*/
{ ACPI_SIG_IBFT },
{ "iBFT" },
+ { "BIFT" }, /* Broadcom iSCSI Offload */
};
static void __init acpi_find_ibft_region(void)
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index e73c6755a5eb..70304220a479 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = {
.ngpio = 50,
.have_blink = true,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
};
/* Intel 3100 */
@@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = {
.uses_gpe0 = true,
.ngpio = 50,
+ .regs = ichx_regs,
+ .reglen = ichx_reglen,
};
/* ICH7 and ICH8-based */
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 99a68310e7c0..3d53fd6880d1 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -894,9 +894,11 @@ static int mcp23s08_probe(struct spi_device *spi)
dev_err(&spi->dev, "invalid spi-present-mask\n");
return -ENODEV;
}
-
- for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++)
+ for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
+ if ((spi_present_mask & (1 << addr)))
+ chips++;
pullups[addr] = 0;
+ }
} else {
type = spi_get_device_id(spi)->driver_data;
pdata = dev_get_platdata(&spi->dev);
@@ -919,12 +921,12 @@ static int mcp23s08_probe(struct spi_device *spi)
pullups[addr] = pdata->chip[addr].pullups;
}
- if (!chips)
- return -ENODEV;
-
base = pdata->base;
}
+ if (!chips)
+ return -ENODEV;
+
data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08),
GFP_KERNEL);
if (!data)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ec82f6bff122..108e1ec2fa4b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1954,6 +1954,9 @@ struct drm_i915_cmd_table {
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
+/* ULX machines are also considered ULT. */
+#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
+ (dev)->pdev->device == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
/*
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 62a5c3627b90..154b0f8bb88d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv);
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
{
- if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ if (i915.enable_ppgtt == 0)
return false;
if (i915.enable_ppgtt == 1 && full)
return false;
+ return true;
+}
+
+static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
+{
+ if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
+ return 0;
+
+ if (enable_ppgtt == 1)
+ return 1;
+
+ if (enable_ppgtt == 2 && HAS_PPGTT(dev))
+ return 2;
+
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
DRM_INFO("Disabling PPGTT because VT-d is on\n");
- return false;
+ return 0;
}
#endif
- /* Full ppgtt disabled by default for now due to issues. */
- if (full)
- return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
- else
- return HAS_ALIASING_PPGTT(dev);
+ return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
#define GEN6_PPGTT_PD_ENTRIES 512
@@ -2031,6 +2041,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
gtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+ /*
+ * i915.enable_ppgtt is read-only, so do an early pass to validate the
+ * user's requested state against the hardware/driver capabilities. We
+ * do this now so that we can print out any log messages once rather
+ * than every time we check intel_enable_ppgtt().
+ */
+ i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
+ DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fa486c5fbb02..aff4a113cda3 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.edp_pps = *edp_pps;
- dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
- DP_LINK_BW_1_62;
+ switch (edp_link_params->rate) {
+ case EDP_RATE_1_62:
+ dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+ break;
+ case EDP_RATE_2_7:
+ dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+ break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
+ break;
+ }
+
switch (edp_link_params->lanes) {
- case 0:
+ case EDP_LANE_1:
dev_priv->vbt.edp_lanes = 1;
break;
- case 1:
+ case EDP_LANE_2:
dev_priv->vbt.edp_lanes = 2;
break;
- case 3:
- default:
+ case EDP_LANE_4:
dev_priv->vbt.edp_lanes = 4;
break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
+ edp_link_params->lanes);
+ break;
}
+
switch (edp_link_params->preemphasis) {
- case 0:
+ case EDP_PREEMPHASIS_NONE:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
break;
- case 1:
+ case EDP_PREEMPHASIS_3_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
break;
- case 2:
+ case EDP_PREEMPHASIS_6dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
break;
- case 3:
+ case EDP_PREEMPHASIS_9_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
+ edp_link_params->preemphasis);
+ break;
}
+
switch (edp_link_params->vswing) {
- case 0:
+ case EDP_VSWING_0_4V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
break;
- case 1:
+ case EDP_VSWING_0_6V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
break;
- case 2:
+ case EDP_VSWING_0_8V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
break;
- case 3:
+ case EDP_VSWING_1_2V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
break;
+ default:
+ DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
+ edp_link_params->vswing);
+ break;
}
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 69bcc42a0e44..48aa516a1ac0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11395,15 +11395,6 @@ void intel_modeset_init(struct drm_device *dev)
}
}
-static void
-intel_connector_break_all_links(struct intel_connector *connector)
-{
- connector->base.dpms = DRM_MODE_DPMS_OFF;
- connector->base.encoder = NULL;
- connector->encoder->connectors_active = false;
- connector->encoder->base.crtc = NULL;
-}
-
static void intel_enable_pipe_a(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -11485,8 +11476,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (connector->encoder->base.crtc != &crtc->base)
continue;
- intel_connector_break_all_links(connector);
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
}
+ /* multiple connectors may have the same encoder:
+ * handle them and break crtc link separately */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head)
+ if (connector->encoder->base.crtc == &crtc->base) {
+ connector->encoder->base.crtc = NULL;
+ connector->encoder->connectors_active = false;
+ }
WARN_ON(crtc->active);
crtc->base.enabled = false;
@@ -11568,6 +11568,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
drm_get_encoder_name(&encoder->base));
encoder->disable(encoder);
}
+ encoder->base.crtc = NULL;
+ encoder->connectors_active = false;
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
@@ -11578,8 +11580,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
base.head) {
if (connector->encoder != encoder)
continue;
-
- intel_connector_break_all_links(connector);
+ connector->base.dpms = DRM_MODE_DPMS_OFF;
+ connector->base.encoder = NULL;
}
}
/* Enabled encoders without active connectors will be fixed in
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index dfa85289f28f..2a00cb828d20 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
case DP_LINK_BW_2_7:
break;
case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
- if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) &&
+ if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
+ INTEL_INFO(dev)->gen >= 8) &&
intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
max_link_bw = DP_LINK_BW_5_4;
else
@@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
return max_link_bw;
}
+static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ u8 source_max, sink_max;
+
+ source_max = 4;
+ if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
+ (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
+ source_max = 2;
+
+ sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ return min(source_max, sink_max);
+}
+
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
@@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
}
max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
- max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+ max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
@@ -750,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = encoder->new_crtc;
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
- int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ int min_lane_count = 1;
+ int max_lane_count = intel_dp_max_lane_count(intel_dp);
/* Conveniently, the link BW constants become indices with a shift...*/
+ int min_clock = 0;
int max_clock = intel_dp_max_link_bw(intel_dp) >> 3;
int bpp, mode_rate;
static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 };
@@ -784,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
- dev_priv->vbt.edp_bpp < bpp) {
- DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp_bpp);
- bpp = dev_priv->vbt.edp_bpp;
+ if (is_edp(intel_dp)) {
+ if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
+ DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp_bpp);
+ bpp = dev_priv->vbt.edp_bpp;
+ }
+
+ if (IS_BROADWELL(dev)) {
+ /* Yes, it's an ugly hack. */
+ min_lane_count = max_lane_count;
+ DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
+ min_lane_count);
+ } else if (dev_priv->vbt.edp_lanes) {
+ min_lane_count = min(dev_priv->vbt.edp_lanes,
+ max_lane_count);
+ DRM_DEBUG_KMS("using min %u lanes per VBT\n",
+ min_lane_count);
+ }
+
+ if (dev_priv->vbt.edp_rate) {
+ min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
+ DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
+ bws[min_clock]);
+ }
}
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
+ for (clock = min_clock; clock <= max_clock; clock++) {
link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fce4a0d93c0b..f73ba5e6b7a8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
height);
}
+ /* No preferred mode marked by the EDID? Are there any modes? */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+ drm_get_connector_name(connector));
+ modes[i] = list_first_entry(&connector->modes,
+ struct drm_display_mode,
+ head);
+ }
+
/* last resort: use current mode */
if (!modes[i]) {
/*
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 0eead16aeda7..cb8cfb7e0974 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 freq;
unsigned long flags;
+ u64 n;
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
@@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
/* scale to hardware max, but be careful to not overflow */
freq = panel->backlight.max;
- if (freq < max)
- level = level * freq / max;
- else
- level = freq / max * level;
+ n = (u64)level * freq;
+ do_div(n, max);
+ level = n;
panel->backlight.level = level;
if (panel->backlight.device)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 19e94c3edc19..d93dcf683e8c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev,
}
}
+static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
+ uint16_t wm[5], uint16_t min)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv->dev);
+
+ if (wm[0] >= min)
+ return false;
+
+ wm[0] = max(wm[0], min);
+ for (level = 1; level <= max_level; level++)
+ wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+
+ return true;
+}
+
+static void snb_wm_latency_quirk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool changed;
+
+ /*
+ * The BIOS provided WM memory latency values are often
+ * inadequate for high resolution displays. Adjust them.
+ */
+ changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
+ ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
+ ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
+
+ if (!changed)
+ return;
+
+ DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
+ intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
static void ilk_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+
+ if (IS_GEN6(dev))
+ snb_wm_latency_quirk(dev);
}
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d27155adf5db..46be00d66df3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
if (ret < 0)
goto err1;
- ret = sysfs_create_link(&encoder->ddc.dev.kobj,
- &drm_connector->kdev->kobj,
+ ret = sysfs_create_link(&drm_connector->kdev->kobj,
+ &encoder->ddc.dev.kobj,
encoder->ddc.dev.kobj.name);
if (ret < 0)
goto err2;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index f729dc71d5be..d0c75779d3f6 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 7762665ad8fd..876de9ac3793 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
}
if (outp == 8)
- return false;
+ return conf;
data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
if (data == 0x0000)
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
index 1dc37b1ddbfa..b0d0fb2f4d08 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
+ mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
mmio_list(0x40800c, 0x00000000, 8, 1);
mmio_list(0x408010, 0x80000000, 0, 0);
@@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
mmio_list(0x418e24, 0x00000000, 8, 0);
mmio_list(0x418e28, 0x80000030, 0, 0);
+ mmio_list(0x4064c8, 0x018002c0, 0, 0);
+
mmio_list(0x418810, 0x80000000, 12, 2);
mmio_list(0x419848, 0x10000000, 12, 2);
mmio_list(0x419c2c, 0x10000000, 12, 2);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index fb0b6b2d1427..222e8ebb669d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
*/
i = 16;
do {
- if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55)
+ u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
+ if (data == 0xaa55)
break;
} while (i--);
@@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
goto out;
/* read entire bios image to system memory */
- bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512;
+ bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
+ bios->size = bios->size * 512;
if (!bios->size)
goto out;
bios->data = kmalloc(bios->size, GFP_KERNEL);
if (bios->data) {
- for (i = 0; i < bios->size; i+=4)
- nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i));
+ for (i = 0; i < bios->size; i += 4)
+ ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
}
/* check the PCI record header */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
index 43fec17ea540..bbf117be572f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line)
case 0x00: return 2;
case 0x19: return 1;
case 0x1c: return 0;
+ case 0x1e: return 2;
default:
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 83face3f608f..279206997e5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev)
acpi_status status;
acpi_handle dhandle, rom_handle;
- if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected)
- return false;
-
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 3ff030dc1ee3..da764a4ed958 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
- mutex_unlock(&chan->cli->mutex);
if (ret)
goto fail_unreserve;
+ mutex_unlock(&chan->cli->mutex);
/* Update the crtc struct and cleanup */
crtc->primary->fb = fb;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index fb187c78978f..c31c12b4e666 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1177,27 +1177,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
/* Set NUM_BANKS. */
if (rdev->family >= CHIP_TAHITI) {
- unsigned tileb, index, num_banks, tile_split_bytes;
+ unsigned index, num_banks;
- /* Calculate the macrotile mode index. */
- tile_split_bytes = 64 << tile_split;
- tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
- tileb = min(tile_split_bytes, tileb);
+ if (rdev->family >= CHIP_BONAIRE) {
+ unsigned tileb, tile_split_bytes;
- for (index = 0; tileb > 64; index++) {
- tileb >>= 1;
- }
+ /* Calculate the macrotile mode index. */
+ tile_split_bytes = 64 << tile_split;
+ tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = min(tile_split_bytes, tileb);
- if (index >= 16) {
- DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
- target_fb->bits_per_pixel, tile_split);
- return -EINVAL;
- }
+ for (index = 0; tileb > 64; index++)
+ tileb >>= 1;
+
+ if (index >= 16) {
+ DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+ target_fb->bits_per_pixel, tile_split);
+ return -EINVAL;
+ }
- if (rdev->family >= CHIP_BONAIRE)
num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
- else
+ } else {
+ switch (target_fb->bits_per_pixel) {
+ case 8:
+ index = 10;
+ break;
+ case 16:
+ index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
+ break;
+ default:
+ case 32:
+ index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
+ break;
+ }
+
num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
+ }
+
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
/* NI and older. */
@@ -1720,8 +1736,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
/* otherwise, pick one of the plls */
if ((rdev->family == CHIP_KAVERI) ||
- (rdev->family == CHIP_KABINI)) {
- /* KB/KV has PPLL1 and PPLL2 */
+ (rdev->family == CHIP_KABINI) ||
+ (rdev->family == CHIP_MULLINS)) {
+ /* KB/KV/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -1885,6 +1902,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
is_tvcv = true;
+ if (!radeon_crtc->adjusted_clock)
+ return -EINVAL;
+
atombios_crtc_set_pll(crtc, adjusted_mode);
if (ASIC_IS_DCE4(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index bc0119fb6c12..54e4f52549af 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -366,11 +366,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3))
+ if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3))
+ if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
@@ -419,21 +419,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
- drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
- DP_EDP_CONFIGURATION_CAP, &tmp);
- if (tmp & 1)
- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
- else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
- (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
- panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
- else
- panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+ DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+ (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+ else
+ panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ }
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
- drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
- DP_EDP_CONFIGURATION_CAP, &tmp);
- if (tmp & 1)
- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux,
+ DP_EDP_CONFIGURATION_CAP, &tmp) == 1) {
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ }
}
return panel_mode;
@@ -809,11 +811,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
- drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp);
- if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
- dp_info.tp3_supported = true;
- else
+ if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp)
+ == 1) {
+ if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
+ dp_info.tp3_supported = true;
+ else
+ dp_info.tp3_supported = false;
+ } else {
dp_info.tp3_supported = false;
+ }
memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 199eb194716f..d2fd98968085 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin");
MODULE_FIRMWARE("radeon/KABINI_mec.bin");
MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
+MODULE_FIRMWARE("radeon/MULLINS_me.bin");
+MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
+MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
+MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
+MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] =
0xd80c, 0xff000ff0, 0x00000100
};
+static const u32 godavari_golden_registers[] =
+{
+ 0x55e4, 0xff607fff, 0xfc000100,
+ 0x6ed8, 0x00010101, 0x00010000,
+ 0x9830, 0xffffffff, 0x00000000,
+ 0x98302, 0xf00fffff, 0x00000400,
+ 0x6130, 0xffffffff, 0x00010000,
+ 0x5bb0, 0x000000f0, 0x00000070,
+ 0x5bc0, 0xf0311fff, 0x80300000,
+ 0x98f8, 0x73773777, 0x12010001,
+ 0x98fc, 0xffffffff, 0x00000010,
+ 0x8030, 0x00001f0f, 0x0000100a,
+ 0x2f48, 0x73773777, 0x12010001,
+ 0x2408, 0x000fffff, 0x000c007f,
+ 0x8a14, 0xf000003f, 0x00000007,
+ 0x8b24, 0xffffffff, 0x00ff0fff,
+ 0x30a04, 0x0000ff0f, 0x00000000,
+ 0x28a4c, 0x07ffffff, 0x06000000,
+ 0x4d8, 0x00000fff, 0x00000100,
+ 0xd014, 0x00010000, 0x00810001,
+ 0xd814, 0x00010000, 0x00810001,
+ 0x3e78, 0x00000001, 0x00000002,
+ 0xc768, 0x00000008, 0x00000008,
+ 0xc770, 0x00000f00, 0x00000800,
+ 0xc774, 0x00000f00, 0x00000800,
+ 0xc798, 0x00ffffff, 0x00ff7fbf,
+ 0xc79c, 0x00ffffff, 0x00ff7faf,
+ 0x8c00, 0x000000ff, 0x00000001,
+ 0x214f8, 0x01ff01ff, 0x00000002,
+ 0x21498, 0x007ff800, 0x00200000,
+ 0x2015c, 0xffffffff, 0x00000f40,
+ 0x88c4, 0x001f3ae3, 0x00000082,
+ 0x88d4, 0x0000001f, 0x00000010,
+ 0x30934, 0xffffffff, 0x00000000
+};
+
+
static void cik_init_golden_registers(struct radeon_device *rdev)
{
switch (rdev->family) {
@@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
kalindi_golden_spm_registers,
(const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
break;
+ case CHIP_MULLINS:
+ radeon_program_register_sequence(rdev,
+ kalindi_mgcg_cgcg_init,
+ (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ radeon_program_register_sequence(rdev,
+ godavari_golden_registers,
+ (const u32)ARRAY_SIZE(godavari_golden_registers));
+ radeon_program_register_sequence(rdev,
+ kalindi_golden_common_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
+ radeon_program_register_sequence(rdev,
+ kalindi_golden_spm_registers,
+ (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ break;
case CHIP_KAVERI:
radeon_program_register_sequence(rdev,
spectre_mgcg_cgcg_init,
@@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev)
rlc_req_size = KB_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
break;
+ case CHIP_MULLINS:
+ chip_name = "MULLINS";
+ pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
+ me_req_size = CIK_ME_UCODE_SIZE * 4;
+ ce_req_size = CIK_CE_UCODE_SIZE * 4;
+ mec_req_size = CIK_MEC_UCODE_SIZE * 4;
+ rlc_req_size = ML_RLC_UCODE_SIZE * 4;
+ sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ break;
default: BUG();
}
@@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KABINI:
+ case CHIP_MULLINS:
default:
rdev->config.cik.max_shader_engines = 1;
rdev->config.cik.max_tile_pipes = 2;
@@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
@@ -5800,6 +5868,9 @@ static int cik_rlc_resume(struct radeon_device *rdev)
case CHIP_KABINI:
size = KB_RLC_UCODE_SIZE;
break;
+ case CHIP_MULLINS:
+ size = ML_RLC_UCODE_SIZE;
+ break;
}
cik_rlc_stop(rdev);
@@ -6548,6 +6619,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
buffer[count++] = cpu_to_le32(0x00000000);
break;
case CHIP_KABINI:
+ case CHIP_MULLINS:
buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
buffer[count++] = cpu_to_le32(0x00000000);
break;
@@ -6693,6 +6765,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev)
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
+ /* pflip */
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ }
/* dac hotplug */
WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
@@ -7049,6 +7134,25 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
+ if (rdev->num_crtc >= 2) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ }
+
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
@@ -7085,6 +7189,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
+ rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC0_REGISTER_OFFSET);
+ rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC1_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS +
+ EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
+
+ if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
@@ -7095,6 +7222,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
@@ -7106,6 +7239,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
}
if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
@@ -7457,6 +7596,15 @@ restart_ih:
break;
}
break;
+ case 8: /* D1 page flip */
+ case 10: /* D2 page flip */
+ case 12: /* D3 page flip */
+ case 14: /* D4 page flip */
+ case 16: /* D5 page flip */
+ case 18: /* D6 page flip */
+ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index f7e46cf682af..72e464c79a88 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 213873270d5f..dd7926394a8f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -888,6 +888,15 @@
# define DC_HPD6_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780
+/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
+#define GRPH_INT_STATUS 0x6858
+# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
+# define GRPH_PFLIP_INT_CLEAR (1 << 8)
+/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
+#define GRPH_INT_CONTROL 0x685c
+# define GRPH_PFLIP_INT_MASK (1 << 0)
+# define GRPH_PFLIP_INT_TYPE (1 << 8)
+
#define DAC_AUTODETECT_INT_CONTROL 0x67c8
#define DC_HPD1_INT_STATUS 0x601c
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index b406546440da..0f7a51a3694f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4371,7 +4371,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
- u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
u32 dma_cntl, dma_cntl1 = 0;
u32 thermal_int = 0;
@@ -4554,15 +4553,21 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
if (rdev->num_crtc >= 4) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (rdev->num_crtc >= 6) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4951,6 +4956,15 @@ restart_ih:
break;
}
break;
+ case 8: /* D1 page flip */
+ case 10: /* D2 page flip */
+ case 12: /* D3 page flip */
+ case 14: /* D4 page flip */
+ case 16: /* D5 page flip */
+ case 18: /* D6 page flip */
+ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 287fe966d7de..478caefe0fef 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 16ec9d56a234..3f6e817d97ee 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev,
return 0;
}
+static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_2bit)
+{
+ struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 i;
+
+ if (vddc_sclk_table && vddc_sclk_table->count) {
+ if (vid_2bit < vddc_sclk_table->count)
+ return vddc_sclk_table->entries[vid_2bit].v;
+ else
+ return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
+ } else {
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+ return vid_mapping_table->entries[i].vid_7bit;
+ }
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
+ }
+}
+
+static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit)
+{
+ struct radeon_clock_voltage_dependency_table *vddc_sclk_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 i;
+
+ if (vddc_sclk_table && vddc_sclk_table->count) {
+ for (i = 0; i < vddc_sclk_table->count; i++) {
+ if (vddc_sclk_table->entries[i].v == vid_7bit)
+ return i;
+ }
+ return vddc_sclk_table->count - 1;
+ } else {
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+ return vid_mapping_table->entries[i].vid_2bit;
+ }
+
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+ }
+}
+
static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
u16 voltage)
{
@@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
u32 vid_2bit)
{
struct kv_power_info *pi = kv_get_pi(rdev);
- u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
- &pi->sys_info.vid_mapping_table,
- vid_2bit);
+ u32 vid_8bit = kv_convert_vid2_to_vid7(rdev,
+ &pi->sys_info.vid_mapping_table,
+ vid_2bit);
return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
}
@@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev)
static int kv_unforce_levels(struct radeon_device *rdev)
{
- if (rdev->family == CHIP_KABINI)
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
else
return kv_set_enabled_levels(rdev);
@@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
struct radeon_uvd_clock_voltage_dependency_table *table =
&rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
int ret;
+ u32 mask;
if (!gate) {
- if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
+ if (table->count)
pi->uvd_boot_level = table->count - 1;
else
pi->uvd_boot_level = 0;
+ if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
+ mask = 1 << pi->uvd_boot_level;
+ } else {
+ mask = 0x1f;
+ }
+
ret = kv_copy_bytes_to_smc(rdev,
pi->dpm_table_start +
offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
@@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
if (ret)
return ret;
- if (!pi->caps_uvd_dpm ||
- pi->caps_stable_p_state)
- kv_send_msg_to_smc_with_parameter(rdev,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (1 << pi->uvd_boot_level));
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ mask);
}
return kv_enable_uvd_dpm(rdev, !gate);
@@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
if (pi->acp_power_gated == gate)
return;
- if (rdev->family == CHIP_KABINI)
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
return;
pi->acp_power_gated = gate;
@@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
}
}
- if (rdev->family == CHIP_KABINI) {
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
if (pi->enable_dpm) {
kv_set_valid_clock_range(rdev, new_ps);
kv_update_dfs_bypass_settings(rdev, new_ps);
@@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
kv_update_sclk_t(rdev);
+ if (rdev->family == CHIP_MULLINS)
+ kv_enable_nb_dpm(rdev);
}
} else {
if (pi->enable_dpm) {
@@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
- if (rdev->family == CHIP_KABINI) {
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
kv_force_lowest_valid(rdev);
kv_init_graphics_levels(rdev);
kv_program_bootup_state(rdev);
@@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
static void kv_patch_voltage_values(struct radeon_device *rdev)
{
int i;
- struct radeon_uvd_clock_voltage_dependency_table *table =
+ struct radeon_uvd_clock_voltage_dependency_table *uvd_table =
&rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct radeon_vce_clock_voltage_dependency_table *vce_table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table *samu_table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table *acp_table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
- if (table->count) {
- for (i = 0; i < table->count; i++)
- table->entries[i].v =
+ if (uvd_table->count) {
+ for (i = 0; i < uvd_table->count; i++)
+ uvd_table->entries[i].v =
kv_convert_8bit_index_to_voltage(rdev,
- table->entries[i].v);
+ uvd_table->entries[i].v);
+ }
+
+ if (vce_table->count) {
+ for (i = 0; i < vce_table->count; i++)
+ vce_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(rdev,
+ vce_table->entries[i].v);
+ }
+
+ if (samu_table->count) {
+ for (i = 0; i < samu_table->count; i++)
+ samu_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(rdev,
+ samu_table->entries[i].v);
+ }
+
+ if (acp_table->count) {
+ for (i = 0; i < acp_table->count; i++)
+ acp_table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(rdev,
+ acp_table->entries[i].v);
}
}
@@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev)
break;
}
- if (rdev->family == CHIP_KABINI)
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
else
return kv_set_enabled_level(rdev, i);
@@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev)
break;
}
- if (rdev->family == CHIP_KABINI)
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
else
return kv_set_enabled_level(rdev, i);
@@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
else
pi->battery_state = false;
- if (rdev->family == CHIP_KABINI) {
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
ps->dpm0_pg_nb_ps_lo = 0x1;
ps->dpm0_pg_nb_ps_hi = 0x0;
ps->dpmx_nb_ps_lo = 0x1;
@@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
if (pi->lowest_valid > pi->highest_valid)
return -EINVAL;
- if (rdev->family == CHIP_KABINI) {
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
pi->graphics_level[i].GnbSlow = 1;
pi->graphics_level[i].ForceNbPs1 = 0;
@@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev)
break;
kv_set_divider_value(rdev, i, table->entries[i].clk);
- vid_2bit = sumo_convert_vid7_to_vid2(rdev,
- &pi->sys_info.vid_mapping_table,
- table->entries[i].v);
+ vid_2bit = kv_convert_vid7_to_vid2(rdev,
+ &pi->sys_info.vid_mapping_table,
+ table->entries[i].v);
kv_set_vid(rdev, i, vid_2bit);
kv_set_at(rdev, i, pi->at[i]);
kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
@@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev,
struct kv_power_info *pi = kv_get_pi(rdev);
u32 nbdpmconfig1;
- if (rdev->family == CHIP_KABINI)
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
return;
if (pi->sys_info.nb_dpm_enable) {
@@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->sram_end = SMC_RAM_END;
- if (rdev->family == CHIP_KABINI)
- pi->high_voltage_t = 4001;
-
pi->enable_nb_dpm = true;
pi->caps_power_containment = true;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6e887d004eba..bbc189fd3ddc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
@@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
- u32 d1grph = 0, d2grph = 0;
u32 dma_cntl;
u32 thermal_int = 0;
@@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
- WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
- WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3918,6 +3918,14 @@ restart_ih:
break;
}
break;
+ case 9: /* D1 pflip */
+ DRM_DEBUG("IH: D1 flip\n");
+ radeon_crtc_handle_flip(rdev, 0);
+ break;
+ case 11: /* D2 pflip */
+ DRM_DEBUG("IH: D2 flip\n");
+ radeon_crtc_handle_flip(rdev, 1);
+ break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 53fcb28f5578..4969cef44a19 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b58e1afdda76..8149e7cf4303 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -730,6 +730,12 @@ struct cik_irq_stat_regs {
u32 disp_int_cont4;
u32 disp_int_cont5;
u32 disp_int_cont6;
+ u32 d1grph_int;
+ u32 d2grph_int;
+ u32 d3grph_int;
+ u32 d4grph_int;
+ u32 d5grph_int;
+ u32 d6grph_int;
};
union radeon_irq_stat_regs {
@@ -1636,6 +1642,7 @@ struct radeon_vce {
unsigned fb_version;
atomic_t handles[RADEON_MAX_VCE_HANDLES];
struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
+ unsigned img_size[RADEON_MAX_VCE_HANDLES];
struct delayed_work idle_work;
};
@@ -1649,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi);
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
int radeon_vce_cs_parse(struct radeon_cs_parser *p);
bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
@@ -2634,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI))
+#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
+ (rdev->family == CHIP_MULLINS))
#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
(rdev->ddev->pdev->device == 0x6850) || \
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index b8a24a75d4ff..be20e62dac83 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2516,6 +2516,7 @@ int radeon_asic_init(struct radeon_device *rdev)
break;
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
rdev->asic = &kv_asic;
/* set num crtcs */
if (rdev->family == CHIP_KAVERI) {
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index b3633d9a5317..9ab30976287d 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
}
+ if (!found) {
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
if (!found)
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 511fe26198e4..0e770bbf7e29 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = {
"KAVERI",
"KABINI",
"HAWAII",
+ "MULLINS",
"LAST",
};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8d99d5ee8014..f00dbbf4d806 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -284,6 +284,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
u32 update_pending;
int vpos, hpos;
+ /* can happen during initialization */
+ if (radeon_crtc == NULL)
+ return;
+
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
@@ -826,14 +830,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
/* make sure nominator is large enough */
if (*nom < nom_min) {
- tmp = (nom_min + *nom - 1) / *nom;
+ tmp = DIV_ROUND_UP(nom_min, *nom);
*nom *= tmp;
*den *= tmp;
}
/* make sure the denominator is large enough */
if (*den < den_min) {
- tmp = (den_min + *den - 1) / *den;
+ tmp = DIV_ROUND_UP(den_min, *den);
*nom *= tmp;
*den *= tmp;
}
@@ -858,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = min(210 / post_div, ref_div_max);
+ ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -993,6 +997,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
/* this also makes sure that the reference divider is large enough */
avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
+ /* avoid high jitter with small fractional dividers */
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
+ fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
+ if (fb_div < fb_div_min) {
+ unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
+ fb_div *= tmp;
+ ref_div *= tmp;
+ }
+ }
+
/* and finally save the result */
if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
*fb_div_p = fb_div / 10;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 9da5da4ffd17..4b7b87f71a63 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -97,6 +97,7 @@ enum radeon_family {
CHIP_KAVERI,
CHIP_KABINI,
CHIP_HAWAII,
+ CHIP_MULLINS,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 0cc47f12d995..eaaedba04675 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return r;
}
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
- }
+ if (rdev->accel_working) {
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
- /* map the ib pool buffer read only into
- * virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
- RADEON_VM_PAGE_READABLE |
- RADEON_VM_PAGE_SNOOPED);
+ /* map the ib pool buffer read only into
+ * virtual address space */
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ RADEON_VM_PAGE_READABLE |
+ RADEON_VM_PAGE_SNOOPED);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
- if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
- kfree(fpriv);
- return r;
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
}
-
file_priv->driver_priv = fpriv;
}
@@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
struct radeon_bo_va *bo_va;
int r;
- r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
- if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
- radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ if (rdev->accel_working) {
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (!r) {
+ bo_va = radeon_vm_bo_find(&fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ if (bo_va)
+ radeon_vm_bo_rmv(rdev, bo_va);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
+ }
}
radeon_vm_fini(rdev, &fpriv->vm);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 19bec0dbfa38..4faa4d6f9bb4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
* into account. We don't want to disallow buffer moves
* completely.
*/
- if (current_domain != RADEON_GEM_DOMAIN_CPU &&
+ if ((lobj->alt_domain & current_domain) != 0 &&
(domain & current_domain) == 0 && /* will be moved */
bytes_moved > bytes_moved_threshold) {
/* don't move it */
@@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 0);
rdev = rbo->rdev;
- if (bo->mem.mem_type == TTM_PL_VRAM) {
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) > rdev->mc.visible_vram_size) {
- /* hurrah the memory is not visible ! */
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- r = ttm_bo_validate(bo, &rbo->placement, false, false);
- if (unlikely(r != 0))
- return r;
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should not happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
- }
+ if (bo->mem.mem_type != TTM_PL_VRAM)
+ return 0;
+
+ size = bo->mem.num_pages << PAGE_SHIFT;
+ offset = bo->mem.start << PAGE_SHIFT;
+ if ((offset + size) <= rdev->mc.visible_vram_size)
+ return 0;
+
+ /* hurrah the memory is not visible ! */
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ if (unlikely(r == -ENOMEM)) {
+ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+ return ttm_bo_validate(bo, &rbo->placement, false, false);
+ } else if (unlikely(r != 0)) {
+ return r;
}
+
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fac8efe8340..53d6e1bb48dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set profile when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
@@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set method when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ count = -EINVAL;
+ goto fail;
+ }
+
/* we don't support the legacy modes with dpm */
if (rdev->pm.pm_method == PM_METHOD_DPM) {
count = -EINVAL;
@@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
@@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
+ /* Can't set dpm state when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
@@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
@@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
enum radeon_dpm_forced_level level;
int ret = 0;
+ /* Can't force performance level when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
mutex_lock(&rdev->pm.mutex);
if (strncmp("low", buf, strlen("low")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_LOW;
@@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
char *buf)
{
struct radeon_device *rdev = dev_get_drvdata(dev);
+ struct drm_device *ddev = rdev->ddev;
int temp;
+ /* Can't get temperature when the card is off */
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (rdev->asic->pm.get_temperature)
temp = radeon_get_temperature(rdev);
else
@@ -1300,6 +1336,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_KABINI:
case CHIP_KAVERI:
case CHIP_HAWAII:
+ case CHIP_MULLINS:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1613,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct drm_device *ddev = rdev->ddev;
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->flags & RADEON_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ seq_printf(m, "PX asic powered off\n");
+ } else if (rdev->pm.dpm_enabled) {
mutex_lock(&rdev->pm.mutex);
if (rdev->asic->dpm.debugfs_print_current_performance_level)
radeon_dpm_debugfs_print_current_performance_level(rdev, m);
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 58d12938c0b8..4e7c3269b183 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -52,6 +52,7 @@
#define BONAIRE_RLC_UCODE_SIZE 2048
#define KB_RLC_UCODE_SIZE 2560
#define KV_RLC_UCODE_SIZE 2560
+#define ML_RLC_UCODE_SIZE 2560
/* MC */
#define BTC_MC_UCODE_SIZE 6024
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 5748bdaeacce..1b65ae2433cd 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_KABINI:
case CHIP_KAVERI:
case CHIP_HAWAII:
+ case CHIP_MULLINS:
fw_name = FIRMWARE_BONAIRE;
break;
@@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
cmd = radeon_get_ib_value(p, p->idx) >> 1;
if (cmd < 0x4) {
+ if (end <= start) {
+ DRM_ERROR("invalid reloc offset %X!\n", offset);
+ return -EINVAL;
+ }
if ((end - start) < buf_sizes[cmd]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
(unsigned)(end - start), buf_sizes[cmd]);
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index ced53dd03e7c..3971d968af6c 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev)
case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
+ case CHIP_MULLINS:
fw_name = FIRMWARE_BONAIRE;
break;
@@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
* @p: parser context
* @lo: address of lower dword
* @hi: address of higher dword
+ * @size: size of checker for relocation buffer
*
* Patch relocation inside command stream with real buffer address
*/
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
+int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
+ unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
- uint64_t offset;
+ struct radeon_cs_reloc *reloc;
+ uint64_t start, end, offset;
unsigned idx;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
@@ -461,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
return -EINVAL;
}
- offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
+ reloc = p->relocs_ptr[(idx / 4)];
+ start = reloc->gpu_offset;
+ end = start + radeon_bo_size(reloc->robj);
+ start += offset;
- p->ib.ptr[lo] = offset & 0xFFFFFFFF;
- p->ib.ptr[hi] = offset >> 32;
+ p->ib.ptr[lo] = start & 0xFFFFFFFF;
+ p->ib.ptr[hi] = start >> 32;
+
+ if (end <= start) {
+ DRM_ERROR("invalid reloc offset %llX!\n", offset);
+ return -EINVAL;
+ }
+ if ((end - start) < size) {
+ DRM_ERROR("buffer to small (%d / %d)!\n",
+ (unsigned)(end - start), size);
+ return -EINVAL;
+ }
return 0;
}
/**
+ * radeon_vce_validate_handle - validate stream handle
+ *
+ * @p: parser context
+ * @handle: handle to validate
+ *
+ * Validates the handle and return the found session index or -EINVAL
+ * we we don't have another free session index.
+ */
+int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+{
+ unsigned i;
+
+ /* validate the handle */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+ return i;
+ }
+
+ /* handle not found try to alloc a new one */
+ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
+ if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
+ p->rdev->vce.filp[i] = p->filp;
+ p->rdev->vce.img_size[i] = 0;
+ return i;
+ }
+ }
+
+ DRM_ERROR("No more free VCE handles!\n");
+ return -EINVAL;
+}
+
+/**
* radeon_vce_cs_parse - parse and validate the command stream
*
* @p: parser context
@@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
*/
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
- uint32_t handle = 0;
- bool destroy = false;
+ int session_idx = -1;
+ bool destroyed = false;
+ uint32_t tmp, handle = 0;
+ uint32_t *size = &tmp;
int i, r;
while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
@@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
+ if (destroyed) {
+ DRM_ERROR("No other command allowed after destroy!\n");
+ return -EINVAL;
+ }
+
switch (cmd) {
case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2);
+ session_idx = radeon_vce_validate_handle(p, handle);
+ if (session_idx < 0)
+ return session_idx;
+ size = &p->rdev->vce.img_size[session_idx];
break;
case 0x00000002: // task info
+ break;
+
case 0x01000001: // create
+ *size = radeon_get_ib_value(p, p->idx + 8) *
+ radeon_get_ib_value(p, p->idx + 10) *
+ 8 * 3 / 2;
+ break;
+
case 0x04000001: // config extension
case 0x04000002: // pic control
case 0x04000005: // rate control
@@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break;
case 0x03000001: // encode
- r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
+ r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
+ *size);
if (r)
return r;
- r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
+ r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
+ *size / 3);
if (r)
return r;
break;
case 0x02000001: // destroy
- destroy = true;
+ destroyed = true;
break;
case 0x05000001: // context buffer
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ *size * 2);
+ if (r)
+ return r;
+ break;
+
case 0x05000004: // video bitstream buffer
+ tmp = radeon_get_ib_value(p, p->idx + 4);
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ tmp);
+ if (r)
+ return r;
+ break;
+
case 0x05000005: // feedback buffer
- r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
+ r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
+ 4096);
if (r)
return r;
break;
@@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
+ if (session_idx == -1) {
+ DRM_ERROR("no session command at start of IB\n");
+ return -EINVAL;
+ }
+
p->idx += len / 4;
}
- if (destroy) {
+ if (destroyed) {
/* IB contains a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
-
- return 0;
- }
-
- /* create or encode, validate the handle */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
- return 0;
}
- /* handle not found try to alloc a new one */
- for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
- p->rdev->vce.filp[i] = p->filp;
- return 0;
- }
- }
-
- DRM_ERROR("No more free VCE handles!\n");
- return -EINVAL;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2aae6ce49d32..d9ab99f47612 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 12;
+ ndw += vm->max_pde_used * 16;
/* update too big for an IB */
if (ndw > 0xfffff)
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index aca8cbe8a335..bbf2e076ee45 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ac708e006180..22a63c98ba14 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5780,7 +5780,6 @@ int si_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
- u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 dma_cntl, dma_cntl1;
u32 thermal_int = 0;
@@ -5919,16 +5918,22 @@ int si_irq_set(struct radeon_device *rdev)
}
if (rdev->num_crtc >= 2) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (rdev->num_crtc >= 4) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (rdev->num_crtc >= 6) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ GRPH_PFLIP_INT_MASK);
}
if (!ASIC_IS_NODCE(rdev)) {
@@ -6292,6 +6297,15 @@ restart_ih:
break;
}
break;
+ case 8: /* D1 page flip */
+ case 10: /* D2 page flip */
+ case 12: /* D3 page flip */
+ case 14: /* D4 page flip */
+ case 16: /* D5 page flip */
+ case 18: /* D6 page flip */
+ DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index cf0fdad8c278..de0ca070122f 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -213,6 +213,7 @@ int si_copy_dma(struct radeon_device *rdev,
r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, NULL);
return r;
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 683532f84931..7321283602ce 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -107,8 +107,8 @@
#define SPLL_CHG_STATUS (1 << 1)
#define SPLL_CNTL_MODE 0x618
#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 8)
-# define SPLL_REFCLK_SEL_MASK 0xFF00
+# define SPLL_REFCLK_SEL(x) ((x) << 26)
+# define SPLL_REFCLK_SEL_MASK (3 << 26)
#define CG_SPLL_SPREAD_SPECTRUM 0x620
#define SSEN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 0a243f0e5d68..be42c8125203 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev)
int r;
/* raise clocks while booting up the VCPU */
- radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (rdev->family < CHIP_RV740)
+ radeon_set_uvd_clocks(rdev, 10000, 10000);
+ else
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
r = uvd_v1_0_start(rdev);
if (r)
@@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
struct radeon_fence *fence = NULL;
int r;
- r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (rdev->family < CHIP_RV740)
+ r = radeon_set_uvd_clocks(rdev, 10000, 10000);
+ else
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
if (r) {
DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
return r;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 10a2c0866459..da52279de939 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1253,7 +1253,8 @@ EXPORT_SYMBOL_GPL(hid_output_report);
static int hid_report_len(struct hid_report *report)
{
- return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+ /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ return ((report->size - 1) >> 3) + 1 + (report->id > 0);
}
/*
@@ -1266,7 +1267,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
* of implement() working on 8 byte chunks
*/
- int len = hid_report_len(report);
+ int len = hid_report_len(report) + 7;
return kmalloc(len, flags);
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c8af7202c28d..34bb2205d2ea 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -301,6 +301,9 @@
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
+#define USB_VENDOR_ID_ELITEGROUP 0x03fc
+#define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8
+
#define USB_VENDOR_ID_ELO 0x04E7
#define USB_DEVICE_ID_ELO_TS2515 0x0022
#define USB_DEVICE_ID_ELO_TS2700 0x0020
@@ -834,6 +837,10 @@
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
+
+#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
+#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
#define USB_VENDOR_ID_THINGM 0x27b8
#define USB_DEVICE_ID_BLINK1 0x01ed
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 35278e43c7a4..51e25b9407f2 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1155,6 +1155,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+ /* Elitegroup panel */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
+ USB_DEVICE_ID_ELITEGROUP_05D8) },
+
/* Flatfrog Panels */
{ .driver_data = MT_CLS_FLATFROG,
MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index af8244b1c1f4..be14b5690e94 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -708,6 +708,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
USB_DEVICE_ID_STM_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
+ USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dbd83878ff99..8e4ddb369883 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -119,6 +119,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
{ 0, 0 }
};
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 90ec1173b8a1..01723f04fe45 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev,
if (retval < 0)
goto fail;
- hyst = val - retval * 1000;
+ hyst = retval * 1000 - val;
hyst = DIV_ROUND_CLOSEST(hyst, 1000);
if (hyst < 0 || hyst > 255) {
retval = -ERANGE;
@@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client,
}
id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
- if (id != 0x01)
+ if (id < 0x01 || id > 0x04)
return -ENODEV;
return 0;
@@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client,
if (id->driver_data)
data->groups[1] = &emc1404_group;
- hwmon_dev = hwmon_device_register_with_groups(&client->dev,
- client->name, data,
- data->groups);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+ client->name, data,
+ data->groups);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 22e92c3d3d07..3c20e4bd6dd1 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
*/
dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
+ /* enforce disabled interrupts (due to HW issues) */
+ i2c_dw_disable_int(dev);
+
/* Enable the adapter */
__i2c_dw_enable(dev, true);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 28cbe1b2a2ec..32c85e9ecdae 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
resource_size(&adev->res));
- if (IS_ERR(dev->virtbase)) {
+ if (!dev->virtbase) {
ret = -ENOMEM;
goto err_no_mem;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 1b4cf14f1106..2a5efb5b487c 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
int ret, idx;
ret = pm_runtime_get_sync(qup->dev);
- if (ret)
+ if (ret < 0)
goto out;
writel(1, qup->base + QUP_SW_RESET);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d4fa8eba6e9d..06d47aafbb79 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EINVAL;
for (i = 0; i < num; i++) {
+ /* This HW can't send STOP after address phase */
+ if (msgs[i].len == 0) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
/*-------------- spin lock -----------------*/
spin_lock_irqsave(&priv->lock, flags);
@@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
static u32 rcar_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ /* This HW can't do SMBUS_QUICK and NOSTART */
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
}
static const struct i2c_algorithm rcar_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index ae4491062e41..bb3a9964f7e0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
- i2c->suspended = 0;
clk_prepare_enable(i2c->clk);
s3c24xx_i2c_init(i2c);
clk_disable_unprepare(i2c->clk);
+ i2c->suspended = 0;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1b6dbe156a37..199c7896f081 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -48,6 +48,7 @@
#include <linux/mlx4/driver.h>
#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include "mlx4_ib.h"
#include "user.h"
@@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
}
#endif
+#define MLX4_IB_INVALID_MAC ((u64)-1)
+static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ int port)
+{
+ u64 new_smac = 0;
+ u64 release_mac = MLX4_IB_INVALID_MAC;
+ struct mlx4_ib_qp *qp;
+
+ read_lock(&dev_base_lock);
+ new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ read_unlock(&dev_base_lock);
+
+ mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
+ qp = ibdev->qp1_proxy[port - 1];
+ if (qp) {
+ int new_smac_index;
+ u64 old_smac = qp->pri.smac;
+ struct mlx4_update_qp_params update_params;
+
+ if (new_smac == old_smac)
+ goto unlock;
+
+ new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
+
+ if (new_smac_index < 0)
+ goto unlock;
+
+ update_params.smac_index = new_smac_index;
+ if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+ &update_params)) {
+ release_mac = new_smac;
+ goto unlock;
+ }
+
+ qp->pri.smac = new_smac;
+ qp->pri.smac_index = new_smac_index;
+
+ release_mac = old_smac;
+ }
+
+unlock:
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
+ if (release_mac != MLX4_IB_INVALID_MAC)
+ mlx4_unregister_mac(ibdev->dev, port, release_mac);
+}
+
static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct mlx4_ib_dev *ibdev, u8 port)
{
@@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
return 0;
}
-static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
+static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev,
+ unsigned long event)
+
{
struct mlx4_ib_iboe *iboe;
+ int update_qps_port = -1;
int port;
iboe = &ibdev->iboe;
@@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
curr_master = iboe->masters[port - 1];
+ if (dev == iboe->netdevs[port - 1] &&
+ (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
+ event == NETDEV_UP || event == NETDEV_CHANGE))
+ update_qps_port = port;
+
if (curr_netdev) {
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
@@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
}
spin_unlock(&iboe->lock);
+
+ if (update_qps_port > 0)
+ mlx4_ib_update_qps(ibdev, dev, update_qps_port);
}
static int mlx4_ib_netdev_event(struct notifier_block *this,
@@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, dev, event);
return NOTIFY_DONE;
}
@@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_map;
for (i = 0; i < ibdev->num_ports; ++i) {
+ mutex_init(&ibdev->qp1_proxy_lock[i]);
if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
IB_LINK_LAYER_ETHERNET) {
err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
@@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (i = 1 ; i <= ibdev->num_ports ; ++i)
reset_gid_table(ibdev, i);
rtnl_lock();
- mlx4_ib_scan_netdevs(ibdev);
+ mlx4_ib_scan_netdevs(ibdev, NULL, 0);
rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f589522fddfd..66b0b7dbd9f4 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -522,6 +522,9 @@ struct mlx4_ib_dev {
int steer_qpn_count;
int steer_qpn_base;
int steering_support;
+ struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
+ /* lock when destroying qp1_proxy and getting netdev events */
+ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
};
struct ib_event_work {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 41308af4163c..dc57482ae7af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp)
if (is_qp0(dev, mqp))
mlx4_CLOSE_PORT(dev->dev, mqp->port);
+ if (dev->qp1_proxy[mqp->port - 1] == mqp) {
+ mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ dev->qp1_proxy[mqp->port - 1] = NULL;
+ mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]);
+ }
+
pd = get_pd(mqp);
destroy_qp_common(dev, mqp, !!pd->ibpd.uobject);
@@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
if (err)
return -EINVAL;
+ if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
+ dev->qp1_proxy[qp->port - 1] = qp;
}
}
}
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index c98fdb185931..a1710465faaf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -28,6 +28,7 @@
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
+#include <linux/semaphore.h>
#include "isert_proto.h"
#include "ib_isert.h"
@@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
struct isert_device *device;
struct ib_device *ib_dev = cma_id->device;
int ret = 0;
- u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
+ u8 pi_support;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (!np->enabled) {
+ spin_unlock_bh(&np->np_thread_lock);
+ pr_debug("iscsi_np is not enabled, reject connect request\n");
+ return rdma_reject(cma_id, NULL, 0);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
cma_id, cma_id->context);
@@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_mr;
}
+ pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
if (pi_support && !device->pi_capable) {
pr_err("Protection information requested but not supported\n");
ret = -EINVAL;
@@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_conn_dev;
mutex_lock(&isert_np->np_accept_mutex);
- list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+ list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
- pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
- wake_up(&isert_np->np_accept_wq);
+ pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+ up(&isert_np->np_sem);
return 0;
out_conn_dev:
@@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np,
pr_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
- init_waitqueue_head(&isert_np->np_accept_wq);
+ sema_init(&isert_np->np_sem, 0);
mutex_init(&isert_np->np_accept_mutex);
INIT_LIST_HEAD(&isert_np->np_accept_list);
init_completion(&isert_np->np_login_comp);
@@ -3048,18 +3058,6 @@ out:
}
static int
-isert_check_accept_queue(struct isert_np *isert_np)
-{
- int empty;
-
- mutex_lock(&isert_np->np_accept_mutex);
- empty = list_empty(&isert_np->np_accept_list);
- mutex_unlock(&isert_np->np_accept_mutex);
-
- return empty;
-}
-
-static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
@@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
int max_accept = 0, ret;
accept_wait:
- ret = wait_event_interruptible(isert_np->np_accept_wq,
- !isert_check_accept_queue(isert_np) ||
- np->np_thread_state == ISCSI_NP_THREAD_RESET);
+ ret = down_interruptible(&isert_np->np_sem);
if (max_accept > 5)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
- pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+ pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 4c072ae34c01..da6612e68000 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -182,7 +182,7 @@ struct isert_device {
};
struct isert_np {
- wait_queue_head_t np_accept_wq;
+ struct semaphore np_sem;
struct rdma_cm_id *np_cm_id;
struct mutex np_accept_mutex;
struct list_head np_accept_list;
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 2626773ff29b..2dd1d0dd4f7d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data);
static void *atkbd_platform_fixup_data;
static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int);
+/*
+ * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding
+ * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed.
+ */
+static bool atkbd_skip_deactivate;
+
static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
ssize_t (*handler)(struct atkbd *, char *));
static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd)
* Make sure nothing is coming from the keyboard and disturbs our
* internal state.
*/
- atkbd_deactivate(atkbd);
+ if (!atkbd_skip_deactivate)
+ atkbd_deactivate(atkbd);
return 0;
}
@@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id)
return 1;
}
+static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id)
+{
+ atkbd_skip_deactivate = true;
+ return 1;
+}
+
static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
{
.matches = {
@@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = {
.callback = atkbd_setup_scancode_fixup,
.driver_data = atkbd_oqo_01plus_scancode_fixup,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"),
+ },
+ .callback = atkbd_deactivate_fixup,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"),
+ },
+ .callback = atkbd_deactivate_fixup,
+ },
{ }
};
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 55c15304ddbc..4e491c1762cf 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -392,6 +392,13 @@ static const struct of_device_id tca8418_dt_ids[] = {
{ }
};
MODULE_DEVICE_TABLE(of, tca8418_dt_ids);
+
+/*
+ * The device tree based i2c loader looks for
+ * "i2c:" + second_component_of(property("compatible"))
+ * and therefore we need an alias to be found.
+ */
+MODULE_ALIAS("i2c:tca8418");
#endif
static struct i2c_driver tca8418_keypad_driver = {
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 52d3a9b28f0b..b36831c828d3 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -70,6 +70,7 @@
#define BMA150_CFG_5_REG 0x11
#define BMA150_CHIP_ID 2
+#define BMA180_CHIP_ID 3
#define BMA150_CHIP_ID_REG BMA150_DATA_0_REG
#define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG
@@ -539,7 +540,7 @@ static int bma150_probe(struct i2c_client *client,
}
chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG);
- if (chip_id != BMA150_CHIP_ID) {
+ if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) {
dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id);
return -EINVAL;
}
@@ -643,6 +644,7 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL);
static const struct i2c_device_id bma150_id[] = {
{ "bma150", 0 },
+ { "bma180", 0 },
{ "smb380", 0 },
{ "bma023", 0 },
{ }
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 088d3541c7d3..b96e978a37b7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -11,6 +11,7 @@
*/
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/input.h>
@@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse)
break;
case 3:
- etd->reg_10 = 0x0b;
+ if (etd->set_hw_resolution)
+ etd->reg_10 = 0x0b;
+ else
+ etd->reg_10 = 0x03;
+
if (elantech_write_reg(psmouse, 0x10, etd->reg_10))
rc = -1;
@@ -1331,6 +1336,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
}
/*
+ * Some hw_version 3 models go into error state when we try to set bit 3 of r10
+ */
+static const struct dmi_system_id no_hw_res_dmi_table[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Gigabyte U2442 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U2442"),
+ },
+ },
+#endif
+ { }
+};
+
+/*
* determine hardware version and set some properties according to it.
*/
static int elantech_set_properties(struct elantech_data *etd)
@@ -1390,6 +1411,9 @@ static int elantech_set_properties(struct elantech_data *etd)
*/
etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
+ /* Enable real hardware resolution on hw_version 3 ? */
+ etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
+
return 0;
}
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 036a04abaef7..9e0e2a1f340d 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -130,6 +130,7 @@ struct elantech_data {
bool jumpy_cursor;
bool reports_pressure;
bool crc_enabled;
+ bool set_hw_resolution;
unsigned char hw_version;
unsigned int fw_version;
unsigned int single_finger_reports;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ef9f4913450d..d68d33fb5ac2 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1566,6 +1566,14 @@ static const struct dmi_system_id min_max_dmi_table[] __initconst = {
.driver_data = (int []){1232, 5710, 1156, 4696},
},
{
+ /* Lenovo ThinkPad Edge E431 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
+ },
+ .driver_data = (int []){1024, 5022, 2508, 4832},
+ },
+ {
/* Lenovo ThinkPad T431s */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index c949520bd196..57068e8035b5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
iommu_flush_dte(iommu, devid);
if (devid != alias) {
irq_lookup_table[alias] = table;
- set_dte_irq_entry(devid, table);
+ set_dte_irq_entry(alias, table);
iommu_flush_dte(iommu, alias);
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index b76c58dbe30c..0e08545d7298 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
* per device. But we can enable the exclusion range per
* device. This is done here
*/
- set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
+ set_dev_entry_bit(devid, DEV_ENTRY_EX);
iommu->exclusion_start = m->range_start;
iommu->exclusion_length = m->range_length;
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 5208828792e6..203b2e6a91cf 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work)
write = !!(fault->flags & PPR_FAULT_WRITE);
+ down_read(&fault->state->mm->mmap_sem);
npages = get_user_pages(fault->state->task, fault->state->mm,
fault->address, 1, write, 0, &page, NULL);
+ up_read(&fault->state->mm->mmap_sem);
if (npages == 1) {
put_page(page);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 784695d22fde..53b213226c01 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -19,7 +19,6 @@
#include <linux/crypto.h>
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
-#include <linux/percpu.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
@@ -43,6 +42,7 @@ struct convert_context {
struct bvec_iter iter_out;
sector_t cc_sector;
atomic_t cc_pending;
+ struct ablkcipher_request *req;
};
/*
@@ -111,15 +111,7 @@ struct iv_tcw_private {
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
/*
- * Duplicated per-CPU state for cipher.
- */
-struct crypt_cpu {
- struct ablkcipher_request *req;
-};
-
-/*
- * The fields in here must be read only after initialization,
- * changing state should be in crypt_cpu.
+ * The fields in here must be read only after initialization.
*/
struct crypt_config {
struct dm_dev *dev;
@@ -150,12 +142,6 @@ struct crypt_config {
sector_t iv_offset;
unsigned int iv_size;
- /*
- * Duplicated per cpu state. Access through
- * per_cpu_ptr() only.
- */
- struct crypt_cpu __percpu *cpu;
-
/* ESSIV: struct crypto_cipher *essiv_tfm */
void *iv_private;
struct crypto_ablkcipher **tfms;
@@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *);
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
-{
- return this_cpu_ptr(cc->cpu);
-}
-
/*
* Use this to access cipher attributes that are the same for each CPU.
*/
@@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
static void crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
- struct crypt_cpu *this_cc = this_crypt_config(cc);
unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
- if (!this_cc->req)
- this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
+ if (!ctx->req)
+ ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
- ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
- ablkcipher_request_set_callback(this_cc->req,
+ ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+ ablkcipher_request_set_callback(ctx->req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
+ kcryptd_async_done, dmreq_of_req(cc, ctx->req));
}
/*
@@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc,
static int crypt_convert(struct crypt_config *cc,
struct convert_context *ctx)
{
- struct crypt_cpu *this_cc = this_crypt_config(cc);
int r;
atomic_set(&ctx->cc_pending, 1);
@@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->cc_pending);
- r = crypt_convert_block(cc, ctx, this_cc->req);
+ r = crypt_convert_block(cc, ctx, ctx->req);
switch (r) {
/* async */
@@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc,
reinit_completion(&ctx->restart);
/* fall through*/
case -EINPROGRESS:
- this_cc->req = NULL;
+ ctx->req = NULL;
ctx->cc_sector++;
continue;
@@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
io->sector = sector;
io->error = 0;
io->base_io = NULL;
+ io->ctx.req = NULL;
atomic_set(&io->io_pending, 0);
return io;
@@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
if (!atomic_dec_and_test(&io->io_pending))
return;
+ if (io->ctx.req)
+ mempool_free(io->ctx.req, cc->req_pool);
mempool_free(io, cc->io_pool);
if (likely(!base_io))
@@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc)
static void crypt_dtr(struct dm_target *ti)
{
struct crypt_config *cc = ti->private;
- struct crypt_cpu *cpu_cc;
- int cpu;
ti->private = NULL;
@@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->crypt_queue)
destroy_workqueue(cc->crypt_queue);
- if (cc->cpu)
- for_each_possible_cpu(cpu) {
- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
- if (cpu_cc->req)
- mempool_free(cpu_cc->req, cc->req_pool);
- }
-
crypt_free_tfms(cc);
if (cc->bs)
@@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti)
if (cc->dev)
dm_put_device(ti, cc->dev);
- if (cc->cpu)
- free_percpu(cc->cpu);
-
kzfree(cc->cipher);
kzfree(cc->cipher_string);
@@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti,
if (tmp)
DMWARN("Ignoring unexpected additional cipher options");
- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
- __alignof__(struct crypt_cpu));
- if (!cc->cpu) {
- ti->error = "Cannot allocate per cpu state";
- goto bad_mem;
- }
-
/*
* For compatibility with the original dm-crypt mapping format, if
* only the cipher name is supplied, use cbc-plain.
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aa009e865871..fa0f6cbd6a41 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1566,8 +1566,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
}
if (m->pg_init_required)
__pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
dm_table_run_md_queue_async(m->ti->table);
+ spin_unlock_irqrestore(&m->lock, flags);
}
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 13abade76ad9..2e71de8e0048 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -27,6 +27,7 @@
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
+#define NO_SPACE_TIMEOUT (HZ * 60)
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -175,6 +176,7 @@ struct pool {
struct workqueue_struct *wq;
struct work_struct worker;
struct delayed_work waker;
+ struct delayed_work no_space_timeout;
unsigned long last_commit_jiffies;
unsigned ref_count;
@@ -935,7 +937,7 @@ static int commit(struct pool *pool)
{
int r;
- if (get_pool_mode(pool) != PM_WRITE)
+ if (get_pool_mode(pool) >= PM_READ_ONLY)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
@@ -1590,6 +1592,20 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
+/*
+ * We're holding onto IO to allow userland time to react. After the
+ * timeout either the pool will have been resized (and thus back in
+ * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ */
+static void do_no_space_timeout(struct work_struct *ws)
+{
+ struct pool *pool = container_of(to_delayed_work(ws), struct pool,
+ no_space_timeout);
+
+ if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
+ set_pool_mode(pool, PM_READ_ONLY);
+}
+
/*----------------------------------------------------------------*/
struct noflush_work {
@@ -1715,6 +1731,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_discard = process_discard;
pool->process_prepared_mapping = process_prepared_mapping;
pool->process_prepared_discard = process_prepared_discard_passdown;
+
+ if (!pool->pf.error_if_no_space)
+ queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
break;
case PM_WRITE:
@@ -2100,6 +2119,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_WORK(&pool->worker, do_worker);
INIT_DELAYED_WORK(&pool->waker, do_waker);
+ INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
spin_lock_init(&pool->lock);
bio_list_init(&pool->deferred_flush_bios);
INIT_LIST_HEAD(&pool->prepared_mappings);
@@ -2662,6 +2682,7 @@ static void pool_postsuspend(struct dm_target *ti)
struct pool *pool = pt->pool;
cancel_delayed_work(&pool->waker);
+ cancel_delayed_work(&pool->no_space_timeout);
flush_workqueue(pool->wq);
(void) commit(pool);
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8fda38d23e38..237b7e0ddc7a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this,
if (mddev_trylock(mddev)) {
if (mddev->pers)
__md_stop_writes(mddev);
- mddev->safemode = 2;
+ if (mddev->persistent)
+ mddev->safemode = 2;
mddev_unlock(mddev);
}
need_delay = 1;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 33fc408e5eac..cb882aae9e20 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
int max_sectors;
int sectors;
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
@@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
md_write_start(mddev, bio);
- /*
- * Register the new request and wait if the reconstruction
- * thread has put up a bar for new requests.
- * Continue immediately if no resync is active currently.
- */
- wait_barrier(conf);
do {
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index e8a1ce204036..cdd7c1b7259b 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd,
* windows that fall outside that.
*/
for (i = 0; i < n_win_sizes; i++) {
- struct ov7670_win_size *win = &info->devtype->win_sizes[index];
+ struct ov7670_win_size *win = &info->devtype->win_sizes[i];
if (info->min_width && win->width < info->min_width)
continue;
if (info->min_height && win->height < info->min_height)
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index a4459301b5f8..ee0f57e01b56 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
if (ret < 0)
return -EINVAL;
- node_ep = v4l2_of_get_next_endpoint(node, NULL);
+ node_ep = of_graph_get_next_endpoint(node, NULL);
if (!node_ep) {
dev_warn(dev, "no endpoint defined for node: %s\n",
node->full_name);
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index d5a7a135f75d..703560fa5e73 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev,
struct media_entity *ent;
struct media_entity_desc u_ent;
+ memset(&u_ent, 0, sizeof(u_ent));
if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id)))
return -EFAULT;
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index b4f12d00be05..656708252962 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq)
{
struct vpbe_fh *fh = vb2_get_drv_priv(vq);
struct vpbe_layer *layer = fh->layer;
+ struct vpbe_display *disp = fh->disp_dev;
+ unsigned long flags;
if (!vb2_is_streaming(vq))
return 0;
/* release all active buffers */
+ spin_lock_irqsave(&disp->dma_queue_lock, flags);
+ if (layer->cur_frm == layer->next_frm) {
+ vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (layer->cur_frm != NULL)
+ vb2_buffer_done(&layer->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (layer->next_frm != NULL)
+ vb2_buffer_done(&layer->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
while (!list_empty(&layer->dma_queue)) {
layer->next_frm = list_entry(layer->dma_queue.next,
struct vpbe_disp_buffer, list);
list_del(&layer->next_frm->list);
vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR);
}
-
+ spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
return 0;
}
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index d762246eabf5..0379cb9f9a9c 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -734,6 +734,8 @@ static int vpfe_release(struct file *file)
}
vpfe_dev->io_usrs = 0;
vpfe_dev->numbuffers = config_params.numbuffers;
+ videobuf_stop(&vpfe_dev->buffer_queue);
+ videobuf_mmap_free(&vpfe_dev->buffer_queue);
}
/* Decrement device usrs counter */
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 756da78bac23..8dea0b84a3ad 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
+ /* Disable channel as per its device type and channel id */
+ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
+ enable_channel0(0);
+ channel0_intr_enable(0);
+ }
+ if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+ (2 == common->started)) {
+ enable_channel1(0);
+ channel1_intr_enable(0);
+ }
+ common->started = 0;
+
/* release all active buffers */
spin_lock_irqsave(&common->irqlock, flags);
+ if (common->cur_frm == common->next_frm) {
+ vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (common->cur_frm != NULL)
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (common->next_frm != NULL)
+ vb2_buffer_done(&common->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_cap_buffer, list);
@@ -933,17 +956,6 @@ static int vpif_release(struct file *filep)
if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
/* Reset io_usrs member of channel object */
common->io_usrs = 0;
- /* Disable channel as per its device type and channel id */
- if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
- enable_channel0(0);
- channel0_intr_enable(0);
- }
- if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
- (2 == common->started)) {
- enable_channel1(0);
- channel1_intr_enable(0);
- }
- common->started = 0;
/* Free buffers allocated */
vb2_queue_release(&common->buffer_queue);
vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 0ac841e35aa4..aed41edd0501 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq)
common = &ch->common[VPIF_VIDEO_INDEX];
+ /* Disable channel */
+ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ enable_channel2(0);
+ channel2_intr_enable(0);
+ }
+ if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
+ (2 == common->started)) {
+ enable_channel3(0);
+ channel3_intr_enable(0);
+ }
+ common->started = 0;
+
/* release all active buffers */
spin_lock_irqsave(&common->irqlock, flags);
+ if (common->cur_frm == common->next_frm) {
+ vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (common->cur_frm != NULL)
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (common->next_frm != NULL)
+ vb2_buffer_done(&common->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
while (!list_empty(&common->dma_queue)) {
common->next_frm = list_entry(common->dma_queue.next,
struct vpif_disp_buffer, list);
@@ -773,18 +796,6 @@ static int vpif_release(struct file *filep)
if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
/* Reset io_usrs member of channel object */
common->io_usrs = 0;
- /* Disable channel */
- if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
- enable_channel2(0);
- channel2_intr_enable(0);
- }
- if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
- (2 == common->started)) {
- enable_channel3(0);
- channel3_intr_enable(0);
- }
- common->started = 0;
-
/* Free buffers allocated */
vb2_queue_release(&common->buffer_queue);
vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index da2fc86cc524..25dbf5b05a96 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = {
}, {
.name = "YUV 4:2:2 planar, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV422P,
- .depth = { 12 },
+ .depth = { 16 },
.color = FIMC_FMT_YCBYCR422,
.memplanes = 1,
.colplanes = 3,
diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c
index 3aecaf465094..f0c9c42867de 100644
--- a/drivers/media/tuners/fc2580.c
+++ b/drivers/media/tuners/fc2580.c
@@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe)
f_ref = 2UL * priv->cfg->clock / r_val;
n_val = div_u64_rem(f_vco, f_ref, &k_val);
- k_val_reg = 1UL * k_val * (1 << 20) / f_ref;
+ k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref);
ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff));
if (ret < 0)
@@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe)
if (ret < 0)
goto err;
- ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \
- fc2580_if_filter_lut[i].mul / 1000000000);
+ ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock *
+ fc2580_if_filter_lut[i].mul, 1000000000));
if (ret < 0)
goto err;
diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h
index be38a9e637e0..646c99452136 100644
--- a/drivers/media/tuners/fc2580_priv.h
+++ b/drivers/media/tuners/fc2580_priv.h
@@ -22,6 +22,7 @@
#define FC2580_PRIV_H
#include "fc2580.h"
+#include <linux/math64.h>
struct fc2580_reg_val {
u8 reg;
diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile
index 7407b8338ccf..bc38f03394cd 100644
--- a/drivers/media/usb/dvb-usb-v2/Makefile
+++ b/drivers/media/usb/dvb-usb-v2/Makefile
@@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
ccflags-y += -I$(srctree)/drivers/media/tuners
ccflags-y += -I$(srctree)/drivers/media/common
-ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 61d196e8b3ab..dcbd392e6efc 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -24,7 +24,6 @@
#include "rtl2830.h"
#include "rtl2832.h"
-#include "rtl2832_sdr.h"
#include "qt1010.h"
#include "mt2060.h"
@@ -36,6 +35,45 @@
#include "tua9001.h"
#include "r820t.h"
+/*
+ * RTL2832_SDR module is in staging. That logic is added in order to avoid any
+ * hard dependency to drivers/staging/ directory as we want compile mainline
+ * driver even whole staging directory is missing.
+ */
+#include <media/v4l2-subdev.h>
+
+#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR)
+struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+ struct v4l2_subdev *sd);
+#else
+static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe,
+ struct i2c_adapter *i2c, const struct rtl2832_config *cfg,
+ struct v4l2_subdev *sd)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_MEDIA_ATTACH
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+ void *__r = NULL; \
+ typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+ if (__a) { \
+ __r = (void *) __a(ARGS); \
+ if (__r == NULL) \
+ symbol_put(FUNCTION); \
+ } \
+ __r; \
+})
+
+#else
+#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \
+ FUNCTION(ARGS); \
+})
+
+#endif
+
static int rtl28xxu_disable_rc;
module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644);
MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller");
@@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe[0]->ops.tuner_ops.get_rf_strength;
/* attach SDR */
- dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
&rtl28xxu_rtl2832_fc0012_config, NULL);
break;
case TUNER_RTL2832_FC0013:
@@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe[0]->ops.tuner_ops.get_rf_strength;
/* attach SDR */
- dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
&rtl28xxu_rtl2832_fc0013_config, NULL);
break;
case TUNER_RTL2832_E4000: {
@@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
i2c_set_adapdata(i2c_adap_internal, d);
/* attach SDR */
- dvb_attach(rtl2832_sdr_attach, adap->fe[0],
+ dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0],
i2c_adap_internal,
&rtl28xxu_rtl2832_e4000_config, sd);
}
@@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
adap->fe[0]->ops.tuner_ops.get_rf_strength;
/* attach SDR */
- dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
+ dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap,
&rtl28xxu_rtl2832_r820t_config, NULL);
break;
case TUNER_RTL2832_R828D:
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 7277dbd2afcd..ecbcb39feb71 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
-#if !IS_ENABLED(CONFIG_USB_SN9C102)
{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
-#endif
{USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */
{USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)},
{USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)},
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 04b2daf567be..7e2411c36419 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -178,6 +178,9 @@ struct v4l2_create_buffers32 {
static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
+ if (get_user(kp->type, &up->type))
+ return -EFAULT;
+
switch (kp->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us
static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
{
- if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) ||
- get_user(kp->type, &up->type))
- return -EFAULT;
+ if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
+ return -EFAULT;
return __get_v4l2_format32(kp, up);
}
static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
{
if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
- copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt)))
- return -EFAULT;
+ copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
+ return -EFAULT;
return __get_v4l2_format32(&kp->format, &up->format);
}
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
index 110c03627051..b59a17fb7c3e 100644
--- a/drivers/memory/mvebu-devbus.c
+++ b/drivers/memory/mvebu-devbus.c
@@ -108,8 +108,19 @@ static int devbus_set_timing_params(struct devbus *devbus,
node->full_name);
return err;
}
- /* Convert bit width to byte width */
- r.bus_width /= 8;
+
+ /*
+ * The bus width is encoded into the register as 0 for 8 bits,
+ * and 1 for 16 bits, so we do the necessary conversion here.
+ */
+ if (r.bus_width == 8)
+ r.bus_width = 0;
+ else if (r.bus_width == 16)
+ r.bus_width = 1;
+ else {
+ dev_err(devbus->dev, "invalid bus width %d\n", r.bus_width);
+ return -EINVAL;
+ }
err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
&r.badr_skew);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index c9de3d598ea5..1d15735f9ef9 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -338,28 +338,58 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read, int timeout)
{
struct completion trans_done;
- int err = 0, count;
+ u8 dir;
+ int err = 0, i, count;
long timeleft;
unsigned long flags;
+ struct scatterlist *sg;
+ enum dma_data_direction dma_dir;
+ u32 val;
+ dma_addr_t addr;
+ unsigned int len;
+
+ dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
+
+ /* don't transfer data during abort processing */
+ if (pcr->remove_pci)
+ return -EINVAL;
+
+ if ((sglist == NULL) || (num_sg <= 0))
+ return -EINVAL;
- count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
+ if (read) {
+ dir = DEVICE_TO_HOST;
+ dma_dir = DMA_FROM_DEVICE;
+ } else {
+ dir = HOST_TO_DEVICE;
+ dma_dir = DMA_TO_DEVICE;
+ }
+
+ count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
if (count < 1) {
dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
return -EINVAL;
}
dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
+ val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
+ pcr->sgi = 0;
+ for_each_sg(sglist, sg, count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
+ }
spin_lock_irqsave(&pcr->lock, flags);
pcr->done = &trans_done;
pcr->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
+ rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
+ rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
spin_unlock_irqrestore(&pcr->lock, flags);
- rtsx_pci_dma_transfer(pcr, sglist, count, read);
-
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
@@ -383,7 +413,7 @@ out:
pcr->done = NULL;
spin_unlock_irqrestore(&pcr->lock, flags);
- rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
+ dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
if ((err < 0) && (err != -ENODEV))
rtsx_pci_stop_cmd(pcr);
@@ -395,73 +425,6 @@ out:
}
EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read)
-{
- enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (pcr->remove_pci)
- return -EINVAL;
-
- if ((sglist == NULL) || num_sg < 1)
- return -EINVAL;
-
- return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
-
-int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read)
-{
- enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
- if (pcr->remove_pci)
- return -EINVAL;
-
- if (sglist == NULL || num_sg < 1)
- return -EINVAL;
-
- dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
- return num_sg;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
-
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int sg_count, bool read)
-{
- struct scatterlist *sg;
- dma_addr_t addr;
- unsigned int len;
- int i;
- u32 val;
- u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
- unsigned long flags;
-
- if (pcr->remove_pci)
- return -EINVAL;
-
- if ((sglist == NULL) || (sg_count < 1))
- return -EINVAL;
-
- val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
- pcr->sgi = 0;
- for_each_sg(sglist, sg, sg_count, i) {
- addr = sg_dma_address(sg);
- len = sg_dma_len(sg);
- rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1);
- }
-
- spin_lock_irqsave(&pcr->lock, flags);
-
- rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
- rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
-
- spin_unlock_irqrestore(&pcr->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
-
int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
{
int err;
@@ -873,8 +836,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
/* Clear interrupt flag */
rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
- dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg);
-
if ((int_reg & pcr->bier) == 0) {
spin_unlock(&pcr->lock);
return IRQ_NONE;
@@ -905,28 +866,17 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
}
if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
- if (int_reg & (TRANS_FAIL_INT | DELINK_INT))
+ if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
pcr->trans_result = TRANS_RESULT_FAIL;
- else if (int_reg & TRANS_OK_INT)
+ if (pcr->done)
+ complete(pcr->done);
+ } else if (int_reg & TRANS_OK_INT) {
pcr->trans_result = TRANS_RESULT_OK;
-
- if (pcr->done)
- complete(pcr->done);
-
- if (int_reg & SD_EXIST) {
- struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
- if (slot && slot->done_transfer)
- slot->done_transfer(slot->p_dev);
- }
-
- if (int_reg & MS_EXIST) {
- struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD];
- if (slot && slot->done_transfer)
- slot->done_transfer(slot->p_dev);
+ if (pcr->done)
+ complete(pcr->done);
}
}
-
if (pcr->card_inserted || pcr->card_removed)
schedule_delayed_work(&pcr->carddet_work,
msecs_to_jiffies(200));
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 5fb994f9a653..0b9ded13a3ae 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -31,28 +31,14 @@
#include <linux/mfd/rtsx_pci.h>
#include <asm/unaligned.h>
-struct realtek_next {
- unsigned int sg_count;
- s32 cookie;
-};
-
struct realtek_pci_sdmmc {
struct platform_device *pdev;
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
struct mmc_request *mrq;
- struct mmc_command *cmd;
- struct mmc_data *data;
-
- spinlock_t lock;
- struct timer_list timer;
- struct tasklet_struct cmd_tasklet;
- struct tasklet_struct data_tasklet;
- struct tasklet_struct finish_tasklet;
-
- u8 rsp_type;
- u8 rsp_len;
- int sg_count;
+
+ struct mutex host_mutex;
+
u8 ssc_depth;
unsigned int clock;
bool vpclk;
@@ -62,13 +48,8 @@ struct realtek_pci_sdmmc {
int power_state;
#define SDMMC_POWER_ON 1
#define SDMMC_POWER_OFF 0
-
- struct realtek_next next_data;
};
-static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
- struct mmc_request *mrq);
-
static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
{
return &(host->pdev->dev);
@@ -105,95 +86,6 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
#define sd_print_debug_regs(host)
#endif /* DEBUG */
-static void sd_isr_done_transfer(struct platform_device *pdev)
-{
- struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
-
- spin_lock(&host->lock);
- if (host->cmd)
- tasklet_schedule(&host->cmd_tasklet);
- if (host->data)
- tasklet_schedule(&host->data_tasklet);
- spin_unlock(&host->lock);
-}
-
-static void sd_request_timeout(unsigned long host_addr)
-{
- struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- if (!host->mrq) {
- dev_err(sdmmc_dev(host), "error: no request exist\n");
- goto out;
- }
-
- if (host->cmd)
- host->cmd->error = -ETIMEDOUT;
- if (host->data)
- host->data->error = -ETIMEDOUT;
-
- dev_dbg(sdmmc_dev(host), "timeout for request\n");
-
-out:
- tasklet_schedule(&host->finish_tasklet);
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static void sd_finish_request(unsigned long host_addr)
-{
- struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
- struct rtsx_pcr *pcr = host->pcr;
- struct mmc_request *mrq;
- struct mmc_command *cmd;
- struct mmc_data *data;
- unsigned long flags;
- bool any_error;
-
- spin_lock_irqsave(&host->lock, flags);
-
- del_timer(&host->timer);
- mrq = host->mrq;
- if (!mrq) {
- dev_err(sdmmc_dev(host), "error: no request need finish\n");
- goto out;
- }
-
- cmd = mrq->cmd;
- data = mrq->data;
-
- any_error = (mrq->sbc && mrq->sbc->error) ||
- (mrq->stop && mrq->stop->error) ||
- (cmd && cmd->error) || (data && data->error);
-
- if (any_error) {
- rtsx_pci_stop_cmd(pcr);
- sd_clear_error(host);
- }
-
- if (data) {
- if (any_error)
- data->bytes_xfered = 0;
- else
- data->bytes_xfered = data->blocks * data->blksz;
-
- if (!data->host_cookie)
- rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len,
- data->flags & MMC_DATA_READ);
-
- }
-
- host->mrq = NULL;
- host->cmd = NULL;
- host->data = NULL;
-
-out:
- spin_unlock_irqrestore(&host->lock, flags);
- mutex_unlock(&pcr->pcr_mutex);
- mmc_request_done(host->mmc, mrq);
-}
-
static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
u8 *buf, int buf_len, int timeout)
{
@@ -311,7 +203,8 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
return 0;
}
-static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
+static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
+ struct mmc_command *cmd)
{
struct rtsx_pcr *pcr = host->pcr;
u8 cmd_idx = (u8)cmd->opcode;
@@ -319,14 +212,11 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
int err = 0;
int timeout = 100;
int i;
+ u8 *ptr;
+ int stat_idx = 0;
u8 rsp_type;
int rsp_len = 5;
- unsigned long flags;
-
- if (host->cmd)
- dev_err(sdmmc_dev(host), "error: cmd already exist\n");
-
- host->cmd = cmd;
+ bool clock_toggled = false;
dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n",
__func__, cmd_idx, arg);
@@ -361,8 +251,6 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
err = -EINVAL;
goto out;
}
- host->rsp_type = rsp_type;
- host->rsp_len = rsp_len;
if (rsp_type == SD_RSP_TYPE_R1b)
timeout = 3000;
@@ -372,6 +260,8 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
0xFF, SD_CLK_TOGGLE_EN);
if (err < 0)
goto out;
+
+ clock_toggled = true;
}
rtsx_pci_init_cmd(pcr);
@@ -395,60 +285,25 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd)
/* Read data from ping-pong buffer */
for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+ stat_idx = 16;
} else if (rsp_type != SD_RSP_TYPE_R0) {
/* Read data from SD_CMDx registers */
for (i = SD_CMD0; i <= SD_CMD4; i++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0);
+ stat_idx = 5;
}
rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0);
- mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout));
-
- spin_lock_irqsave(&pcr->lock, flags);
- pcr->trans_result = TRANS_NOT_READY;
- rtsx_pci_send_cmd_no_wait(pcr);
- spin_unlock_irqrestore(&pcr->lock, flags);
-
- return;
-
-out:
- cmd->error = err;
- tasklet_schedule(&host->finish_tasklet);
-}
-
-static void sd_get_rsp(unsigned long host_addr)
-{
- struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
- struct rtsx_pcr *pcr = host->pcr;
- struct mmc_command *cmd;
- int i, err = 0, stat_idx;
- u8 *ptr, rsp_type;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- cmd = host->cmd;
- host->cmd = NULL;
-
- if (!cmd) {
- dev_err(sdmmc_dev(host), "error: cmd not exist\n");
+ err = rtsx_pci_send_cmd(pcr, timeout);
+ if (err < 0) {
+ sd_print_debug_regs(host);
+ sd_clear_error(host);
+ dev_dbg(sdmmc_dev(host),
+ "rtsx_pci_send_cmd error (err = %d)\n", err);
goto out;
}
- spin_lock(&pcr->lock);
- if (pcr->trans_result == TRANS_NO_DEVICE)
- err = -ENODEV;
- else if (pcr->trans_result != TRANS_RESULT_OK)
- err = -EINVAL;
- spin_unlock(&pcr->lock);
-
- if (err < 0)
- goto out;
-
- rsp_type = host->rsp_type;
- stat_idx = host->rsp_len;
-
if (rsp_type == SD_RSP_TYPE_R0) {
err = 0;
goto out;
@@ -485,106 +340,26 @@ static void sd_get_rsp(unsigned long host_addr)
cmd->resp[0]);
}
- if (cmd == host->mrq->sbc) {
- sd_send_cmd(host, host->mrq->cmd);
- spin_unlock_irqrestore(&host->lock, flags);
- return;
- }
-
- if (cmd == host->mrq->stop)
- goto out;
-
- if (cmd->data) {
- sd_start_multi_rw(host, host->mrq);
- spin_unlock_irqrestore(&host->lock, flags);
- return;
- }
-
out:
cmd->error = err;
- tasklet_schedule(&host->finish_tasklet);
- spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
- struct mmc_data *data, struct realtek_next *next)
-{
- struct rtsx_pcr *pcr = host->pcr;
- int read = data->flags & MMC_DATA_READ;
- int sg_count = 0;
-
- if (!next && data->host_cookie &&
- data->host_cookie != host->next_data.cookie) {
- dev_err(sdmmc_dev(host),
- "error: invalid cookie data[%d] host[%d]\n",
- data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
- }
-
- if (next || (!next && data->host_cookie != host->next_data.cookie))
- sg_count = rtsx_pci_dma_map_sg(pcr,
- data->sg, data->sg_len, read);
- else
- sg_count = host->next_data.sg_count;
-
- if (next) {
- next->sg_count = sg_count;
- if (++next->cookie < 0)
- next->cookie = 1;
- data->host_cookie = next->cookie;
- }
-
- return sg_count;
-}
-
-static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
- bool is_first_req)
-{
- struct realtek_pci_sdmmc *host = mmc_priv(mmc);
- struct mmc_data *data = mrq->data;
-
- if (data->host_cookie) {
- dev_err(sdmmc_dev(host),
- "error: descard already cookie data[%d]\n",
- data->host_cookie);
- data->host_cookie = 0;
- }
-
- dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n",
- sd_pre_dma_transfer(host, data, &host->next_data));
-}
-
-static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
- int err)
-{
- struct realtek_pci_sdmmc *host = mmc_priv(mmc);
- struct rtsx_pcr *pcr = host->pcr;
- struct mmc_data *data = mrq->data;
- int read = data->flags & MMC_DATA_READ;
-
- rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
- data->host_cookie = 0;
+ if (err && clock_toggled)
+ rtsx_pci_write_register(pcr, SD_BUS_STAT,
+ SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0);
}
-static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
- struct mmc_request *mrq)
+static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
{
struct rtsx_pcr *pcr = host->pcr;
struct mmc_host *mmc = host->mmc;
struct mmc_card *card = mmc->card;
struct mmc_data *data = mrq->data;
int uhs = mmc_card_uhs(card);
- int read = data->flags & MMC_DATA_READ;
+ int read = (data->flags & MMC_DATA_READ) ? 1 : 0;
u8 cfg2, trans_mode;
int err;
size_t data_len = data->blksz * data->blocks;
- if (host->data)
- dev_err(sdmmc_dev(host), "error: data already exist\n");
-
- host->data = data;
-
if (read) {
cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 |
SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0;
@@ -635,54 +410,15 @@ static int sd_start_multi_rw(struct realtek_pci_sdmmc *host,
rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER,
SD_TRANSFER_END, SD_TRANSFER_END);
- mod_timer(&host->timer, jiffies + 10 * HZ);
rtsx_pci_send_cmd_no_wait(pcr);
- err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read);
- if (err < 0) {
- data->error = err;
- tasklet_schedule(&host->finish_tasklet);
- }
- return 0;
-}
-
-static void sd_finish_multi_rw(unsigned long host_addr)
-{
- struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr;
- struct rtsx_pcr *pcr = host->pcr;
- struct mmc_data *data;
- int err = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- if (!host->data) {
- dev_err(sdmmc_dev(host), "error: no data exist\n");
- goto out;
- }
-
- data = host->data;
- host->data = NULL;
-
- if (pcr->trans_result == TRANS_NO_DEVICE)
- err = -ENODEV;
- else if (pcr->trans_result != TRANS_RESULT_OK)
- err = -EINVAL;
-
+ err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
if (err < 0) {
- data->error = err;
- goto out;
- }
-
- if (!host->mrq->sbc && data->stop) {
- sd_send_cmd(host, data->stop);
- spin_unlock_irqrestore(&host->lock, flags);
- return;
+ sd_clear_error(host);
+ return err;
}
-out:
- tasklet_schedule(&host->finish_tasklet);
- spin_unlock_irqrestore(&host->lock, flags);
+ return 0;
}
static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host)
@@ -901,13 +637,6 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
return 0;
}
-static inline bool sd_use_muti_rw(struct mmc_command *cmd)
-{
- return mmc_op_multi(cmd->opcode) ||
- (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
- (cmd->opcode == MMC_WRITE_BLOCK);
-}
-
static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct realtek_pci_sdmmc *host = mmc_priv(mmc);
@@ -916,14 +645,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_data *data = mrq->data;
unsigned int data_size = 0;
int err;
- unsigned long flags;
-
- mutex_lock(&pcr->pcr_mutex);
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->mrq)
- dev_err(sdmmc_dev(host), "error: request already exist\n");
- host->mrq = mrq;
if (host->eject) {
cmd->error = -ENOMEDIUM;
@@ -936,6 +657,8 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
goto finish;
}
+ mutex_lock(&pcr->pcr_mutex);
+
rtsx_pci_start_run(pcr);
rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth,
@@ -944,28 +667,46 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
rtsx_pci_write_register(pcr, CARD_SHARE_MODE,
CARD_SHARE_MASK, CARD_SHARE_48_SD);
+ mutex_lock(&host->host_mutex);
+ host->mrq = mrq;
+ mutex_unlock(&host->host_mutex);
+
if (mrq->data)
data_size = data->blocks * data->blksz;
- if (sd_use_muti_rw(cmd))
- host->sg_count = sd_pre_dma_transfer(host, data, NULL);
+ if (!data_size || mmc_op_multi(cmd->opcode) ||
+ (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+ (cmd->opcode == MMC_WRITE_BLOCK)) {
+ sd_send_cmd_get_rsp(host, cmd);
- if (!data_size || sd_use_muti_rw(cmd)) {
- if (mrq->sbc)
- sd_send_cmd(host, mrq->sbc);
- else
- sd_send_cmd(host, cmd);
- spin_unlock_irqrestore(&host->lock, flags);
+ if (!cmd->error && data_size) {
+ sd_rw_multi(host, mrq);
+
+ if (mmc_op_multi(cmd->opcode) && mrq->stop)
+ sd_send_cmd_get_rsp(host, mrq->stop);
+ }
} else {
- spin_unlock_irqrestore(&host->lock, flags);
sd_normal_rw(host, mrq);
- tasklet_schedule(&host->finish_tasklet);
}
- return;
+
+ if (mrq->data) {
+ if (cmd->error || data->error)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = data->blocks * data->blksz;
+ }
+
+ mutex_unlock(&pcr->pcr_mutex);
finish:
- tasklet_schedule(&host->finish_tasklet);
- spin_unlock_irqrestore(&host->lock, flags);
+ if (cmd->error)
+ dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error);
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = NULL;
+ mutex_unlock(&host->host_mutex);
+
+ mmc_request_done(mmc, mrq);
}
static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
@@ -1400,8 +1141,6 @@ out:
}
static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
- .pre_req = sdmmc_pre_req,
- .post_req = sdmmc_post_req,
.request = sdmmc_request,
.set_ios = sdmmc_set_ios,
.get_ro = sdmmc_get_ro,
@@ -1465,7 +1204,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
struct realtek_pci_sdmmc *host;
struct rtsx_pcr *pcr;
struct pcr_handle *handle = pdev->dev.platform_data;
- unsigned long host_addr;
if (!handle)
return -ENXIO;
@@ -1489,15 +1227,8 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
- host_addr = (unsigned long)host;
- host->next_data.cookie = 1;
- setup_timer(&host->timer, sd_request_timeout, host_addr);
- tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr);
- tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr);
- tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr);
- spin_lock_init(&host->lock);
+ mutex_init(&host->host_mutex);
- pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer;
realtek_init_host(host);
mmc_add_host(mmc);
@@ -1510,8 +1241,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev);
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
- struct mmc_request *mrq;
- unsigned long flags;
if (!host)
return 0;
@@ -1519,33 +1248,22 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
pcr = host->pcr;
pcr->slots[RTSX_SD_CARD].p_dev = NULL;
pcr->slots[RTSX_SD_CARD].card_event = NULL;
- pcr->slots[RTSX_SD_CARD].done_transfer = NULL;
mmc = host->mmc;
- mrq = host->mrq;
- spin_lock_irqsave(&host->lock, flags);
+ mutex_lock(&host->host_mutex);
if (host->mrq) {
dev_dbg(&(pdev->dev),
"%s: Controller removed during transfer\n",
mmc_hostname(mmc));
- if (mrq->sbc)
- mrq->sbc->error = -ENOMEDIUM;
- if (mrq->cmd)
- mrq->cmd->error = -ENOMEDIUM;
- if (mrq->stop)
- mrq->stop->error = -ENOMEDIUM;
- if (mrq->data)
- mrq->data->error = -ENOMEDIUM;
+ rtsx_pci_complete_unfinished_transfer(pcr);
- tasklet_schedule(&host->finish_tasklet);
+ host->mrq->cmd->error = -ENOMEDIUM;
+ if (host->mrq->stop)
+ host->mrq->stop->error = -ENOMEDIUM;
+ mmc_request_done(mmc, host->mrq);
}
- spin_unlock_irqrestore(&host->lock, flags);
-
- del_timer_sync(&host->timer);
- tasklet_kill(&host->cmd_tasklet);
- tasklet_kill(&host->data_tasklet);
- tasklet_kill(&host->finish_tasklet);
+ mutex_unlock(&host->host_mutex);
mmc_remove_host(mmc);
host->eject = true;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 4615d79fc93f..b922c8efcf40 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 = {
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
+ {.compatible = "ti,keystone-nand", },
{},
};
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
@@ -581,6 +582,11 @@ static struct davinci_nand_pdata
of_property_read_bool(pdev->dev.of_node,
"ti,davinci-nand-use-bbt"))
pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "ti,keystone-nand")) {
+ pdata->options |= NAND_NO_SUBPAGE_WRITE;
+ }
}
return dev_get_platdata(&pdev->dev);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 03e0bcade234..7bbbf1ca0887 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
}
/* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match);
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
static void rlb_src_unlink(struct bonding *bond, u32 index);
static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
@@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
bond->alb_info.rlb_promisc_timeout_counter = 0;
- alb_send_learning_packets(bond->curr_active_slave, addr);
+ alb_send_learning_packets(bond->curr_active_slave, addr, true);
}
/* slave being removed should not be active at this point
@@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
- u16 vid)
+ __be16 vlan_proto, u16 vid)
{
struct learning_pkt pkt;
struct sk_buff *skb;
@@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
skb->dev = slave->dev;
if (vid) {
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
+ skb = vlan_put_tag(skb, vlan_proto, vid);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
slave->bond->dev->name);
@@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
dev_queue_xmit(skb);
}
-
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+ bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
struct net_device *upper;
struct list_head *iter;
/* send untagged */
- alb_send_lp_vid(slave, mac_addr, 0);
+ alb_send_lp_vid(slave, mac_addr, 0, 0);
/* loop through vlans and send one packet for each */
rcu_read_lock();
netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (upper->priv_flags & IFF_802_1Q_VLAN)
- alb_send_lp_vid(slave, mac_addr,
- vlan_dev_vlan_id(upper));
+ if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
+ if (strict_match &&
+ ether_addr_equal_64bits(mac_addr,
+ upper->dev_addr)) {
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ } else if (!strict_match) {
+ alb_send_lp_vid(slave, upper->dev_addr,
+ vlan_dev_vlan_proto(upper),
+ vlan_dev_vlan_id(upper));
+ }
+ }
}
rcu_read_unlock();
}
@@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
/* fasten the change in the switch */
if (bond_slave_can_tx(slave1)) {
- alb_send_learning_packets(slave1, slave1->dev->dev_addr);
+ alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
@@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
}
if (bond_slave_can_tx(slave2)) {
- alb_send_learning_packets(slave2, slave2->dev->dev_addr);
+ alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform the clients that the mac address
* has changed
@@ -1536,6 +1547,8 @@ void bond_alb_monitor(struct work_struct *work)
/* send learning packets */
if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
+ bool strict_match;
+
/* change of curr_active_slave involves swapping of mac addresses.
* in order to avoid this swapping from happening while
* sending the learning packets, the curr_slave_lock must be held for
@@ -1543,8 +1556,15 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave_rcu(bond, slave, iter)
- alb_send_learning_packets(slave, slave->dev->dev_addr);
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ /* If updating current_active, use all currently
+ * user mac addreses (!strict_match). Otherwise, only
+ * use mac of the slave device.
+ */
+ strict_match = (slave != bond->curr_active_slave);
+ alb_send_learning_packets(slave, slave->dev->dev_addr,
+ strict_match);
+ }
read_unlock(&bond->curr_slave_lock);
@@ -1767,7 +1787,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
} else {
/* set the new_slave to the bond mac address */
alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
- alb_send_learning_packets(new_slave, bond->dev->dev_addr);
+ alb_send_learning_packets(new_slave, bond->dev->dev_addr,
+ false);
}
write_lock_bh(&bond->curr_slave_lock);
@@ -1810,7 +1831,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
read_lock(&bond->lock);
- alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
+ alb_send_learning_packets(bond->curr_active_slave,
+ bond_dev->dev_addr, false);
if (bond->alb_info.rlb_enabled) {
/* inform clients mac address has changed */
rlb_req_update_slave_clients(bond, bond->curr_active_slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 499645b0925c..59a12c61ceb4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2157,10 +2157,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
*/
static void bond_arp_send(struct net_device *slave_dev, int arp_op,
__be32 dest_ip, __be32 src_ip,
- struct bond_vlan_tag *inner,
- struct bond_vlan_tag *outer)
+ struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
+ int i;
pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
arp_op, slave_dev->name, &dest_ip, &src_ip);
@@ -2172,21 +2172,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
net_err_ratelimited("ARP packet allocation failed\n");
return;
}
- if (outer->vlan_id) {
- if (inner->vlan_id) {
- pr_debug("inner tag: proto %X vid %X\n",
- ntohs(inner->vlan_proto), inner->vlan_id);
- skb = __vlan_put_tag(skb, inner->vlan_proto,
- inner->vlan_id);
- if (!skb) {
- net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
- }
- }
- pr_debug("outer reg: proto %X vid %X\n",
- ntohs(outer->vlan_proto), outer->vlan_id);
- skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
+ /* Go through all the tags backwards and add them to the packet */
+ for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
+ if (!tags[i].vlan_id)
+ continue;
+
+ pr_debug("inner tag: proto %X vid %X\n",
+ ntohs(tags[i].vlan_proto), tags[i].vlan_id);
+ skb = __vlan_put_tag(skb, tags[i].vlan_proto,
+ tags[i].vlan_id);
+ if (!skb) {
+ net_err_ratelimited("failed to insert inner VLAN tag\n");
+ return;
+ }
+ }
+ /* Set the outer tag */
+ if (tags[0].vlan_id) {
+ pr_debug("outer tag: proto %X vid %X\n",
+ ntohs(tags[0].vlan_proto), tags[0].vlan_id);
+ skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert outer VLAN tag\n");
return;
@@ -2195,22 +2200,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
arp_xmit(skb);
}
+/* Validate the device path between the @start_dev and the @end_dev.
+ * The path is valid if the @end_dev is reachable through device
+ * stacking.
+ * When the path is validated, collect any vlan information in the
+ * path.
+ */
+static bool bond_verify_device_path(struct net_device *start_dev,
+ struct net_device *end_dev,
+ struct bond_vlan_tag *tags)
+{
+ struct net_device *upper;
+ struct list_head *iter;
+ int idx;
+
+ if (start_dev == end_dev)
+ return true;
+
+ netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
+ if (bond_verify_device_path(upper, end_dev, tags)) {
+ if (is_vlan_dev(upper)) {
+ idx = vlan_get_encap_level(upper);
+ if (idx >= BOND_MAX_VLAN_ENCAP)
+ return false;
+
+ tags[idx].vlan_proto =
+ vlan_dev_vlan_proto(upper);
+ tags[idx].vlan_id = vlan_dev_vlan_id(upper);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
- struct net_device *upper, *vlan_upper;
- struct list_head *iter, *vlan_iter;
struct rtable *rt;
- struct bond_vlan_tag inner, outer;
+ struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
__be32 *targets = bond->params.arp_targets, addr;
int i;
+ bool ret;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
- inner.vlan_proto = 0;
- inner.vlan_id = 0;
- outer.vlan_proto = 0;
- outer.vlan_id = 0;
+ memset(tags, 0, sizeof(tags));
/* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2223,7 +2258,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ 0, tags);
continue;
}
@@ -2232,52 +2268,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
goto found;
rcu_read_lock();
- /* first we search only for vlan devices. for every vlan
- * found we verify its upper dev list, searching for the
- * rt->dst.dev. If found we save the tag of the vlan and
- * proceed to send the packet.
- */
- netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
- vlan_iter) {
- if (!is_vlan_dev(vlan_upper))
- continue;
-
- if (vlan_upper == rt->dst.dev) {
- outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
- outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
- rcu_read_unlock();
- goto found;
- }
- netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
- iter) {
- if (upper == rt->dst.dev) {
- /* If the upper dev is a vlan dev too,
- * set the vlan tag to inner tag.
- */
- if (is_vlan_dev(upper)) {
- inner.vlan_proto = vlan_dev_vlan_proto(upper);
- inner.vlan_id = vlan_dev_vlan_id(upper);
- }
- outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
- outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
- rcu_read_unlock();
- goto found;
- }
- }
- }
-
- /* if the device we're looking for is not on top of any of
- * our upper vlans, then just search for any dev that
- * matches, and in case it's a vlan - save the id
- */
- netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
- if (upper == rt->dst.dev) {
- rcu_read_unlock();
- goto found;
- }
- }
+ ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
rcu_read_unlock();
+ if (ret)
+ goto found;
+
/* Not our device - skip */
pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
bond->dev->name, &targets[i],
@@ -2290,7 +2286,7 @@ found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, &inner, &outer);
+ addr, tags);
}
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 94094b3d5a3e..540e0167bf24 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -127,6 +127,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
static const struct bond_opt_value bond_intmax_tbl[] = {
{ "off", 0, BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
};
static const struct bond_opt_value bond_lacp_rate_tbl[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index dfc37797df41..ea64aa2f8b95 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -36,6 +36,7 @@
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+#define BOND_MAX_VLAN_ENCAP 2
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 8ab7103d4f44..61ffc12d8fd8 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM
SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
boards like am335x, dm814x, dm813x and dm811x.
-config CAN_C_CAN_STRICT_FRAME_ORDERING
- bool "Force a strict RX CAN frame order (may cause frame loss)"
- ---help---
- The RX split buffer prevents packet reordering but can cause packet
- loss. Only enable this option when you accept to lose CAN frames
- in favour of getting the received CAN frames in the correct order.
-
config CAN_C_CAN_PCI
tristate "Generic PCI Bus based C_CAN/D_CAN driver"
depends on PCI
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e154b4cb0f1a..8e78bb48f5a4 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -727,26 +727,12 @@ static u32 c_can_adjust_pending(u32 pend)
static inline void c_can_rx_object_get(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- if (obj < C_CAN_MSG_RX_LOW_LAST)
- c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
- else
-#endif
c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
}
static inline void c_can_rx_finalize(struct net_device *dev,
struct c_can_priv *priv, u32 obj)
{
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- if (obj < C_CAN_MSG_RX_LOW_LAST)
- priv->rxmasked |= BIT(obj - 1);
- else if (obj == C_CAN_MSG_RX_LOW_LAST) {
- priv->rxmasked = 0;
- /* activate all lower message objects */
- c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
- }
-#endif
if (priv->type != BOSCH_D_CAN)
c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
}
@@ -794,9 +780,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
{
u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
-#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
- pend &= ~priv->rxmasked;
-#endif
return pend;
}
@@ -809,25 +792,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv)
* has arrived. To work-around this issue, we keep two groups of message
* objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
*
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
- *
- * To ensure in-order frame reception we use the following
- * approach while re-activating a message object to receive further
- * frames:
- * - if the current message object number is lower than
- * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
- * the INTPND bit.
- * - if the current message object number is equal to
- * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
- * receive message objects.
- * - if the current message object number is greater than
- * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
- * only this message object.
- *
- * This can cause packet loss!
- *
- * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
- *
* We clear the newdat bit right away.
*
* This can result in packet reordering when the readout is slow.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index c540e3d12e3d..564933ae218c 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev;
+ struct net_device *dev, *prev_dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
@@ -688,11 +688,13 @@ failure_remove_channels:
writew(0x0, cfg_base + PITA_ICR + 2);
chan = NULL;
- for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
+ for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
priv = netdev_priv(dev);
chan = priv->priv;
+ prev_dev = chan->prev_dev;
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
}
/* free any PCIeC resources too */
@@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
/* Loop over all registered devices */
while (1) {
+ struct net_device *prev_dev = chan->prev_dev;
+
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->prev_dev;
+ dev = prev_dev;
if (!dev) {
/* do that only for first channel */
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
index d4a187e45369..3eff2fd3997e 100644
--- a/drivers/net/ethernet/altera/Makefile
+++ b/drivers/net/ethernet/altera/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
altera_msgdma.o altera_sgdma.o altera_utils.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 38c500f95b9e..0fb986ba3290 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -37,16 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv)
void msgdma_reset(struct altera_tse_private *priv)
{
int counter;
- struct msgdma_csr *txcsr = priv->tx_dma_csr;
- struct msgdma_csr *rxcsr = priv->rx_dma_csr;
/* Reset Rx mSGDMA */
- iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
- iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr,
+ msgdma_csroffs(status));
+ csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr,
+ msgdma_csroffs(control));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(&rxcsr->status,
+ if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status),
MSGDMA_CSR_STAT_RESETTING))
break;
udelay(1);
@@ -57,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv)
"TSE Rx mSGDMA resetting bit never cleared!\n");
/* clear all status bits */
- iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status));
/* Reset Tx mSGDMA */
- iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
- iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr,
+ msgdma_csroffs(status));
+
+ csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr,
+ msgdma_csroffs(control));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(&txcsr->status,
+ if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status),
MSGDMA_CSR_STAT_RESETTING))
break;
udelay(1);
@@ -76,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv)
"TSE Tx mSGDMA resetting bit never cleared!\n");
/* clear all status bits */
- iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+ csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status));
}
void msgdma_disable_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_enable_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_disable_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_enable_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+ tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control),
+ MSGDMA_CSR_CTL_GLOBAL_INTR);
}
void msgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->rx_dma_csr;
- iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+ csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status));
}
void msgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct msgdma_csr *csr = priv->tx_dma_csr;
- iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+ csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status));
}
/* return 0 to indicate transmit is pending */
int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
- struct msgdma_extended_desc *desc = priv->tx_dma_desc;
-
- iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
- iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
- iowrite32(0, &desc->write_addr_lo);
- iowrite32(0, &desc->write_addr_hi);
- iowrite32(buffer->len, &desc->len);
- iowrite32(0, &desc->burst_seq_num);
- iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
- iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+ csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+ msgdma_descroffs(read_addr_lo));
+ csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc,
+ msgdma_descroffs(read_addr_hi));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi));
+ csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len));
+ csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num));
+ csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc,
+ msgdma_descroffs(stride));
+ csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc,
+ msgdma_descroffs(control));
return 0;
}
@@ -136,16 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
u32 ready = 0;
u32 inuse;
u32 status;
- struct msgdma_csr *txcsr = priv->tx_dma_csr;
/* Get number of sent descriptors */
- inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+ inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level))
+ & 0xffff;
if (inuse) { /* Tx FIFO is not empty */
ready = priv->tx_prod - priv->tx_cons - inuse - 1;
} else {
/* Check for buffered last packet */
- status = ioread32(&txcsr->status);
+ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
if (status & MSGDMA_CSR_STAT_BUSY)
ready = priv->tx_prod - priv->tx_cons - 1;
else
@@ -159,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
void msgdma_add_rx_desc(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct msgdma_extended_desc *desc = priv->rx_dma_desc;
u32 len = priv->rx_dma_buf_sz;
dma_addr_t dma_addr = rxbuffer->dma_addr;
u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
@@ -169,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv,
| MSGDMA_DESC_CTL_TR_ERR_IRQ
| MSGDMA_DESC_CTL_GO);
- iowrite32(0, &desc->read_addr_lo);
- iowrite32(0, &desc->read_addr_hi);
- iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
- iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
- iowrite32(len, &desc->len);
- iowrite32(0, &desc->burst_seq_num);
- iowrite32(0x00010001, &desc->stride);
- iowrite32(control, &desc->control);
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo));
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi));
+ csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc,
+ msgdma_descroffs(write_addr_lo));
+ csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc,
+ msgdma_descroffs(write_addr_hi));
+ csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len));
+ csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num));
+ csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride));
+ csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control));
}
/* status is returned on upper 16 bits,
@@ -187,12 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv)
u32 rxstatus = 0;
u32 pktlength;
u32 pktstatus;
- struct msgdma_csr *rxcsr = priv->rx_dma_csr;
- struct msgdma_response *rxresp = priv->rx_dma_resp;
- if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
- pktlength = ioread32(&rxresp->bytes_transferred);
- pktstatus = ioread32(&rxresp->status);
+ if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level))
+ & 0xffff) {
+ pktlength = csrrd32(priv->rx_dma_resp,
+ msgdma_respoffs(bytes_transferred));
+ pktstatus = csrrd32(priv->rx_dma_resp,
+ msgdma_respoffs(status));
rxstatus = pktstatus;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index d7b59ba4019c..e335626e1b6b 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -17,15 +17,6 @@
#ifndef __ALTERA_MSGDMAHW_H__
#define __ALTERA_MSGDMAHW_H__
-/* mSGDMA standard descriptor format
- */
-struct msgdma_desc {
- u32 read_addr; /* data buffer source address */
- u32 write_addr; /* data buffer destination address */
- u32 len; /* the number of bytes to transfer per descriptor */
- u32 control; /* characteristics of the transfer */
-};
-
/* mSGDMA extended descriptor format
*/
struct msgdma_extended_desc {
@@ -159,6 +150,10 @@ struct msgdma_response {
u32 status;
};
+#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a))
+#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a))
+#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a))
+
/* mSGDMA response register bit definitions
*/
#define MSGDMA_RESP_EARLY_TERM BIT(8)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index dbd40e15b5cc..580553d42d34 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -20,8 +20,8 @@
#include "altera_sgdmahw.h"
#include "altera_sgdma.h"
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+ struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
int wfixed);
static int sgdma_async_write(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static int sgdma_async_read(struct altera_tse_private *priv);
static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc);
+ struct sgdma_descrip __iomem *desc);
static int sgdma_txbusy(struct altera_tse_private *priv);
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
priv->rxdescphys = (dma_addr_t) 0;
priv->txdescphys = (dma_addr_t) 0;
- priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+ priv->rxdescphys = dma_map_single(priv->device,
+ (void __force *)priv->rx_dma_desc,
priv->rxdescmem, DMA_BIDIRECTIONAL);
if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
return -EINVAL;
}
- priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+ priv->txdescphys = dma_map_single(priv->device,
+ (void __force *)priv->tx_dma_desc,
priv->txdescmem, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv)
}
/* Initialize descriptor memory to all 0's, sync memory to cache */
- memset(priv->tx_dma_desc, 0, priv->txdescmem);
- memset(priv->rx_dma_desc, 0, priv->rxdescmem);
+ memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->txdescmem, DMA_TO_DEVICE);
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv)
*/
void sgdma_reset(struct altera_tse_private *priv)
{
- u32 *ptxdescripmem = priv->tx_dma_desc;
- u32 txdescriplen = priv->txdescmem;
- u32 *prxdescripmem = priv->rx_dma_desc;
- u32 rxdescriplen = priv->rxdescmem;
- struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
- struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
-
/* Initialize descriptor memory to 0 */
- memset(ptxdescripmem, 0, txdescriplen);
- memset(prxdescripmem, 0, rxdescriplen);
+ memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
+ memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
- iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
- iowrite32(0, &ptxsgdma->control);
+ csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
- iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
- iowrite32(0, &prxsgdma->control);
+ csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
}
/* For SGDMA, interrupts remain enabled after initially enabling,
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv)
void sgdma_clear_rxirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->rx_dma_csr;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+ tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
+ SGDMA_CTRLREG_CLRINT);
}
void sgdma_clear_txirq(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->tx_dma_csr;
- tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+ tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
+ SGDMA_CTRLREG_CLRINT);
}
/* transmits buffer through SGDMA. Returns number of buffers
@@ -184,11 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv)
*/
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
- int pktstx = 0;
- struct sgdma_descrip *descbase = priv->tx_dma_desc;
+ struct sgdma_descrip __iomem *descbase =
+ (struct sgdma_descrip __iomem *)priv->tx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0];
- struct sgdma_descrip *ndesc = &descbase[1];
+ struct sgdma_descrip __iomem *cdesc = &descbase[0];
+ struct sgdma_descrip __iomem *ndesc = &descbase[1];
/* wait 'til the tx sgdma is ready for the next transmit request */
if (sgdma_txbusy(priv))
@@ -204,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
0, /* read fixed */
SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
- pktstx = sgdma_async_write(priv, cdesc);
+ sgdma_async_write(priv, cdesc);
/* enqueue the request to the pending transmit queue */
queue_tx(priv, buffer);
@@ -218,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
u32 sgdma_tx_completions(struct altera_tse_private *priv)
{
u32 ready = 0;
- struct sgdma_descrip *desc = priv->tx_dma_desc;
if (!sgdma_txbusy(priv) &&
- ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+ ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
+ & SGDMA_CONTROL_HW_OWNED) == 0) &&
(dequeue_tx(priv))) {
ready = 1;
}
@@ -245,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv,
*/
u32 sgdma_rx_status(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->rx_dma_csr;
- struct sgdma_descrip *base = priv->rx_dma_desc;
- struct sgdma_descrip *desc = NULL;
- int pktsrx;
- unsigned int rxstatus = 0;
- unsigned int pktlength = 0;
- unsigned int pktstatus = 0;
+ struct sgdma_descrip __iomem *base =
+ (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+ struct sgdma_descrip __iomem *desc = NULL;
struct tse_buffer *rxbuffer = NULL;
+ unsigned int rxstatus = 0;
- u32 sts = ioread32(&csr->status);
+ u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
desc = &base[0];
if (sts & SGDMA_STSREG_EOP) {
+ unsigned int pktlength = 0;
+ unsigned int pktstatus = 0;
dma_sync_single_for_cpu(priv->device,
priv->rxdescphys,
priv->sgdmadesclen,
DMA_FROM_DEVICE);
- pktlength = desc->bytes_xferred;
- pktstatus = desc->status & 0x3f;
- rxstatus = pktstatus;
+ pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
+ pktstatus = csrrd8(desc, sgdma_descroffs(status));
+ rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
rxstatus = rxstatus << 16;
rxstatus |= (pktlength & 0xffff);
if (rxstatus) {
- desc->status = 0;
+ csrwr8(0, desc, sgdma_descroffs(status));
rxbuffer = dequeue_rx(priv);
if (rxbuffer == NULL)
@@ -278,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
"sgdma rx and rx queue empty!\n");
/* Clear control */
- iowrite32(0, &csr->control);
+ csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
/* clear status */
- iowrite32(0xf, &csr->status);
+ csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
/* kick the rx sgdma after reaping this descriptor */
- pktsrx = sgdma_async_read(priv);
+ sgdma_async_read(priv);
} else {
/* If the SGDMA indicated an end of packet on recv,
@@ -297,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
*/
netdev_err(priv->dev,
"SGDMA RX Error Info: %x, %x, %x\n",
- sts, desc->status, rxstatus);
+ sts, csrrd8(desc, sgdma_descroffs(status)),
+ rxstatus);
}
} else if (sts == 0) {
- pktsrx = sgdma_async_read(priv);
+ sgdma_async_read(priv);
}
return rxstatus;
@@ -308,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
/* Private functions */
-static void sgdma_setup_descrip(struct sgdma_descrip *desc,
- struct sgdma_descrip *ndesc,
+static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
+ struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
@@ -319,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
int wfixed)
{
/* Clear the next descriptor as not owned by hardware */
- u32 ctrl = ndesc->control;
+
+ u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
ctrl &= ~SGDMA_CONTROL_HW_OWNED;
- ndesc->control = ctrl;
+ csrwr8(ctrl, ndesc, sgdma_descroffs(control));
- ctrl = 0;
ctrl = SGDMA_CONTROL_HW_OWNED;
ctrl |= generate_eop;
ctrl |= rfixed;
ctrl |= wfixed;
/* Channel is implicitly zero, initialized to 0 by default */
-
- desc->raddr = raddr;
- desc->waddr = waddr;
- desc->next = lower_32_bits(ndesc_phys);
- desc->control = ctrl;
- desc->status = 0;
- desc->rburst = 0;
- desc->wburst = 0;
- desc->bytes = length;
- desc->bytes_xferred = 0;
+ csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
+ csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
+
+ csrwr32(0, desc, sgdma_descroffs(pad1));
+ csrwr32(0, desc, sgdma_descroffs(pad2));
+ csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
+
+ csrwr8(ctrl, desc, sgdma_descroffs(control));
+ csrwr8(0, desc, sgdma_descroffs(status));
+ csrwr8(0, desc, sgdma_descroffs(wburst));
+ csrwr8(0, desc, sgdma_descroffs(rburst));
+ csrwr16(length, desc, sgdma_descroffs(bytes));
+ csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
}
/* If hardware is busy, don't restart async read.
@@ -350,11 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc,
*/
static int sgdma_async_read(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->rx_dma_csr;
- struct sgdma_descrip *descbase = priv->rx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0];
- struct sgdma_descrip *ndesc = &descbase[1];
+ struct sgdma_descrip __iomem *descbase =
+ (struct sgdma_descrip __iomem *)priv->rx_dma_desc;
+ struct sgdma_descrip __iomem *cdesc = &descbase[0];
+ struct sgdma_descrip __iomem *ndesc = &descbase[1];
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
@@ -379,11 +377,13 @@ static int sgdma_async_read(struct altera_tse_private *priv)
priv->sgdmadesclen,
DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
- &csr->next_descrip);
+ csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+ priv->rx_dma_csr,
+ sgdma_csroffs(next_descrip));
- iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
- &csr->control);
+ csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+ priv->rx_dma_csr,
+ sgdma_csroffs(control));
return 1;
}
@@ -392,32 +392,32 @@ static int sgdma_async_read(struct altera_tse_private *priv)
}
static int sgdma_async_write(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
- struct sgdma_csr *csr = priv->tx_dma_csr;
-
if (sgdma_txbusy(priv))
return 0;
/* clear control and status */
- iowrite32(0, &csr->control);
- iowrite32(0x1f, &csr->status);
+ csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
+ csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys,
priv->sgdmadesclen, DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
- &csr->next_descrip);
+ csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+ priv->tx_dma_csr,
+ sgdma_csroffs(next_descrip));
- iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
- &csr->control);
+ csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
+ priv->tx_dma_csr,
+ sgdma_csroffs(control));
return 1;
}
static dma_addr_t
sgdma_txphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
dma_addr_t paddr = priv->txdescmem_busaddr;
uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@ -426,7 +426,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv,
static dma_addr_t
sgdma_rxphysaddr(struct altera_tse_private *priv,
- struct sgdma_descrip *desc)
+ struct sgdma_descrip __iomem *desc)
{
dma_addr_t paddr = priv->rxdescmem_busaddr;
uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@ -515,8 +515,8 @@ queue_rx_peekhead(struct altera_tse_private *priv)
*/
static int sgdma_rxbusy(struct altera_tse_private *priv)
{
- struct sgdma_csr *csr = priv->rx_dma_csr;
- return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+ return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY;
}
/* waits for the tx sgdma to finish it's current operation, returns 0
@@ -525,13 +525,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv)
static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- struct sgdma_csr *csr = priv->tx_dma_csr;
/* if DMA is busy, wait for current transactino to finish */
- while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+ while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
- if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+ if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
+ & SGDMA_STSREG_BUSY) {
netdev_err(priv->dev, "timeout waiting for tx dma\n");
return 1;
}
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index ba3334f35383..85bc33b218d9 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -19,16 +19,16 @@
/* SGDMA descriptor structure */
struct sgdma_descrip {
- unsigned int raddr; /* address of data to be read */
- unsigned int pad1;
- unsigned int waddr;
- unsigned int pad2;
- unsigned int next;
- unsigned int pad3;
- unsigned short bytes;
- unsigned char rburst;
- unsigned char wburst;
- unsigned short bytes_xferred; /* 16 bits, bytes xferred */
+ u32 raddr; /* address of data to be read */
+ u32 pad1;
+ u32 waddr;
+ u32 pad2;
+ u32 next;
+ u32 pad3;
+ u16 bytes;
+ u8 rburst;
+ u8 wburst;
+ u16 bytes_xferred; /* 16 bits, bytes xferred */
/* bit 0: error
* bit 1: length error
@@ -39,7 +39,7 @@ struct sgdma_descrip {
* bit 6: reserved
* bit 7: status eop for recv case
*/
- unsigned char status;
+ u8 status;
/* bit 0: eop
* bit 1: read_fixed
@@ -47,7 +47,7 @@ struct sgdma_descrip {
* bits 3,4,5,6: Channel (always 0)
* bit 7: hardware owned
*/
- unsigned char control;
+ u8 control;
} __packed;
@@ -101,6 +101,8 @@ struct sgdma_csr {
u32 pad3[3];
};
+#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a))
+#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a))
#define SGDMA_STSREG_ERR BIT(0) /* Error */
#define SGDMA_STSREG_EOP BIT(1) /* EOP */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 465c4aabebbd..2adb24d4523c 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -357,6 +357,8 @@ struct altera_tse_mac {
u32 reserved5[42];
};
+#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a))
+
/* Transmit and Receive Command Registers Bit Definitions
*/
#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
@@ -487,4 +489,49 @@ struct altera_tse_private {
*/
void altera_tse_set_ethtool_ops(struct net_device *);
+static inline
+u32 csrrd32(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readl(paddr);
+}
+
+static inline
+u16 csrrd16(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readw(paddr);
+}
+
+static inline
+u8 csrrd8(void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+ return readb(paddr);
+}
+
+static inline
+void csrwr32(u32 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writel(val, paddr);
+}
+
+static inline
+void csrwr16(u16 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writew(val, paddr);
+}
+
+static inline
+void csrwr8(u8 val, void __iomem *mac, size_t offs)
+{
+ void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs);
+
+ writeb(val, paddr);
+}
+
#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index d817e285b266..be72e1e64525 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
u64 *buf)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
u64 ext;
- buf[0] = ioread32(&mac->frames_transmitted_ok);
- buf[1] = ioread32(&mac->frames_received_ok);
- buf[2] = ioread32(&mac->frames_check_sequence_errors);
- buf[3] = ioread32(&mac->alignment_errors);
+ buf[0] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_transmitted_ok));
+ buf[1] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_received_ok));
+ buf[2] = csrrd32(priv->mac_dev,
+ tse_csroffs(frames_check_sequence_errors));
+ buf[3] = csrrd32(priv->mac_dev,
+ tse_csroffs(alignment_errors));
/* Extended aOctetsTransmittedOK counter */
- ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
- ext |= ioread32(&mac->octets_transmitted_ok);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_octets_transmitted_ok)) << 32;
+
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(octets_transmitted_ok));
buf[4] = ext;
/* Extended aOctetsReceivedOK counter */
- ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
- ext |= ioread32(&mac->octets_received_ok);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_octets_received_ok)) << 32;
+
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(octets_received_ok));
buf[5] = ext;
- buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
- buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
- buf[8] = ioread32(&mac->if_in_errors);
- buf[9] = ioread32(&mac->if_out_errors);
- buf[10] = ioread32(&mac->if_in_ucast_pkts);
- buf[11] = ioread32(&mac->if_in_multicast_pkts);
- buf[12] = ioread32(&mac->if_in_broadcast_pkts);
- buf[13] = ioread32(&mac->if_out_discards);
- buf[14] = ioread32(&mac->if_out_ucast_pkts);
- buf[15] = ioread32(&mac->if_out_multicast_pkts);
- buf[16] = ioread32(&mac->if_out_broadcast_pkts);
- buf[17] = ioread32(&mac->ether_stats_drop_events);
+ buf[6] = csrrd32(priv->mac_dev,
+ tse_csroffs(tx_pause_mac_ctrl_frames));
+ buf[7] = csrrd32(priv->mac_dev,
+ tse_csroffs(rx_pause_mac_ctrl_frames));
+ buf[8] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_errors));
+ buf[9] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_errors));
+ buf[10] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_ucast_pkts));
+ buf[11] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_multicast_pkts));
+ buf[12] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_in_broadcast_pkts));
+ buf[13] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_discards));
+ buf[14] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_ucast_pkts));
+ buf[15] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_multicast_pkts));
+ buf[16] = csrrd32(priv->mac_dev,
+ tse_csroffs(if_out_broadcast_pkts));
+ buf[17] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_drop_events));
/* Extended etherStatsOctets counter */
- ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
- ext |= ioread32(&mac->ether_stats_octets);
+ ext = (u64) csrrd32(priv->mac_dev,
+ tse_csroffs(msb_ether_stats_octets)) << 32;
+ ext |= csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_octets));
buf[18] = ext;
- buf[19] = ioread32(&mac->ether_stats_pkts);
- buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
- buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
- buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
- buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
- buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
- buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
- buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
- buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
- buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
- buf[29] = ioread32(&mac->ether_stats_jabbers);
- buf[30] = ioread32(&mac->ether_stats_fragments);
+ buf[19] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts));
+ buf[20] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_undersize_pkts));
+ buf[21] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_oversize_pkts));
+ buf[22] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_64_octets));
+ buf[23] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_65to127_octets));
+ buf[24] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_128to255_octets));
+ buf[25] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_256to511_octets));
+ buf[26] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_512to1023_octets));
+ buf[27] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_1024to1518_octets));
+ buf[28] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_pkts_1519tox_octets));
+ buf[29] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_jabbers));
+ buf[30] = csrrd32(priv->mac_dev,
+ tse_csroffs(ether_stats_fragments));
}
static int tse_sset_count(struct net_device *dev, int sset)
@@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
{
int i;
struct altera_tse_private *priv = netdev_priv(dev);
- u32 *tse_mac_regs = (u32 *)priv->mac_dev;
u32 *buf = regbuf;
/* Set version to a known value, so ethtool knows
@@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (i = 0; i < TSE_NUM_REGS; i++)
- buf[i] = ioread32(&tse_mac_regs[i]);
+ buf[i] = csrrd32(priv->mac_dev, i * 4);
}
static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index e44a4aeb9701..7330681574d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
- struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
- unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
- u32 data;
+ struct net_device *ndev = bus->priv;
+ struct altera_tse_private *priv = netdev_priv(ndev);
/* set MDIO address */
- iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+ csrwr32((mii_id & 0x1f), priv->mac_dev,
+ tse_csroffs(mdio_phy0_addr));
/* get the data */
- data = ioread32(&mdio_regs[regnum]) & 0xffff;
- return data;
+ return csrrd32(priv->mac_dev,
+ tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
}
static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
- struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
- unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+ struct net_device *ndev = bus->priv;
+ struct altera_tse_private *priv = netdev_priv(ndev);
/* set MDIO address */
- iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+ csrwr32((mii_id & 0x1f), priv->mac_dev,
+ tse_csroffs(mdio_phy0_addr));
/* write the data */
- iowrite32((u32) value, &mdio_regs[regnum]);
+ csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
return 0;
}
@@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
for (i = 0; i < PHY_MAX_ADDR; i++)
mdio->irq[i] = PHY_POLL;
- mdio->priv = priv->mac_dev;
+ mdio->priv = dev;
mdio->parent = priv->device;
ret = of_mdiobus_register(mdio, mdio_node);
@@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
enum netdev_tx ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
- int txcomplete = 0;
spin_lock_bh(&priv->tx_lock);
@@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_sync_single_for_device(priv->device, buffer->dma_addr,
buffer->len, DMA_TO_DEVICE);
- txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+ priv->dmaops->tx_buffer(priv, buffer);
skb_tx_timestamp(skb);
@@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
struct altera_tse_private *priv = netdev_priv(dev);
struct phy_device *phydev = NULL;
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
- int ret;
if (priv->phy_addr != POLL_PHY) {
snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
@@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
netdev_err(dev, "Could not attach to PHY\n");
} else {
+ int ret;
phydev = phy_find_first(priv->mdio);
if (phydev == NULL) {
netdev_err(dev, "No PHY found\n");
@@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev)
static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
{
- struct altera_tse_mac *mac = priv->mac_dev;
u32 msb;
u32 lsb;
@@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
/* Set primary MAC address */
- iowrite32(msb, &mac->mac_addr_0);
- iowrite32(lsb, &mac->mac_addr_1);
+ csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
+ csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
}
/* MAC software reset.
@@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
*/
static int reset_mac(struct altera_tse_private *priv)
{
- void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
int counter;
u32 dat;
- dat = ioread32(cmd_cfg_reg);
+ dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
- iowrite32(dat, cmd_cfg_reg);
+ csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
counter = 0;
while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+ if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_SW_RESET))
break;
udelay(1);
}
if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
- dat = ioread32(cmd_cfg_reg);
+ dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
dat &= ~MAC_CMDCFG_SW_RESET;
- iowrite32(dat, cmd_cfg_reg);
+ csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
return -1;
}
return 0;
@@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv)
*/
static int init_mac(struct altera_tse_private *priv)
{
- struct altera_tse_mac *mac = priv->mac_dev;
unsigned int cmd = 0;
u32 frm_length;
/* Setup Rx FIFO */
- iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
- &mac->rx_section_empty);
- iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
- iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
- iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+ csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+ priv->mac_dev, tse_csroffs(rx_section_empty));
+
+ csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
+ tse_csroffs(rx_section_full));
+
+ csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
+ tse_csroffs(rx_almost_empty));
+
+ csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
+ tse_csroffs(rx_almost_full));
/* Setup Tx FIFO */
- iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
- &mac->tx_section_empty);
- iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
- iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
- iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+ csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+ priv->mac_dev, tse_csroffs(tx_section_empty));
+
+ csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
+ tse_csroffs(tx_section_full));
+
+ csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
+ tse_csroffs(tx_almost_empty));
+
+ csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
+ tse_csroffs(tx_almost_full));
/* MAC Address Configuration */
tse_update_mac_addr(priv, priv->dev->dev_addr);
/* MAC Function Configuration */
frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
- iowrite32(frm_length, &mac->frm_length);
- iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+ csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
+
+ csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
+ tse_csroffs(tx_ipg_length));
/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
* start address
*/
- tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
- tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
- ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+ tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
+ ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+
+ tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
+ ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+ ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
/* Set the MAC options */
- cmd = ioread32(&mac->command_config);
+ cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
@@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv)
cmd &= ~MAC_CMDCFG_ETH_SPEED;
cmd &= ~MAC_CMDCFG_ENA_10;
- iowrite32(cmd, &mac->command_config);
+ csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
- iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
+ csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
+ tse_csroffs(pause_quanta));
if (netif_msg_hw(priv))
dev_dbg(priv->device,
@@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv)
*/
static void tse_set_mac(struct altera_tse_private *priv, bool enable)
{
- struct altera_tse_mac *mac = priv->mac_dev;
- u32 value = ioread32(&mac->command_config);
+ u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
if (enable)
value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
else
value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
- iowrite32(value, &mac->command_config);
+ csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
}
/* Change the MTU
@@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
int i;
struct netdev_hw_addr *ha;
/* clear the hash filter */
for (i = 0; i < 64; i++)
- iowrite32(0, &(mac->hash_table[i]));
+ csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
netdev_for_each_mc_addr(ha, dev) {
unsigned int hash = 0;
@@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
hash = (hash << 1) | xor_bit;
}
- iowrite32(1, &(mac->hash_table[hash]));
+ csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
}
}
@@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev)
static void altera_tse_set_mcfilterall(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
int i;
/* set the hash filter */
for (i = 0; i < 64; i++)
- iowrite32(1, &(mac->hash_table[i]));
+ csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
/* Set or clear the multicast filter for this adaptor
@@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
spin_lock(&priv->mac_cfg_lock);
if (dev->flags & IFF_PROMISC)
- tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
if (dev->flags & IFF_ALLMULTI)
altera_tse_set_mcfilterall(dev);
@@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
static void tse_set_rx_mode(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct altera_tse_mac *mac = priv->mac_dev;
spin_lock(&priv->mac_cfg_lock);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
!netdev_mc_empty(dev) || !netdev_uc_empty(dev))
- tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
else
- tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+ tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
+ MAC_CMDCFG_PROMIS_EN);
spin_unlock(&priv->mac_cfg_lock);
}
@@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev)
of_property_read_bool(pdev->dev.of_node,
"altr,has-hash-multicast-filter");
+ /* Set hash filter to not set for now until the
+ * multicast filter receive issue is debugged
+ */
+ priv->hash_filter = 0;
+
/* get supplemental address settings for this instance */
priv->added_unicast =
of_property_read_bool(pdev->dev.of_node,
@@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev)
return 0;
}
-struct altera_dmaops altera_dtype_sgdma = {
+static const struct altera_dmaops altera_dtype_sgdma = {
.altera_dtype = ALTERA_DTYPE_SGDMA,
.dmamask = 32,
.reset_dma = sgdma_reset,
@@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = {
.start_rxdma = sgdma_start_rxdma,
};
-struct altera_dmaops altera_dtype_msgdma = {
+static const struct altera_dmaops altera_dtype_msgdma = {
.altera_dtype = ALTERA_DTYPE_MSGDMA,
.dmamask = 64,
.reset_dma = msgdma_reset,
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
index 70fa13f486b2..d7eeb1713ad2 100644
--- a/drivers/net/ethernet/altera/altera_utils.c
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -17,28 +17,28 @@
#include "altera_tse.h"
#include "altera_utils.h"
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
value |= bit_mask;
- iowrite32(value, ioaddr);
+ csrwr32(value, ioaddr, offs);
}
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
value &= ~bit_mask;
- iowrite32(value, ioaddr);
+ csrwr32(value, ioaddr, offs);
}
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
return (value & bit_mask) ? 1 : 0;
}
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask)
{
- u32 value = ioread32(ioaddr);
+ u32 value = csrrd32(ioaddr, offs);
return (value & bit_mask) ? 0 : 1;
}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index ce1db36d3583..baf100ccf587 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -19,9 +19,9 @@
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
-void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
-void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
-int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask);
#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 8d0479d5be8e..02a23c2901c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2696,7 +2696,7 @@ out:
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return 0;
+ return rc;
}
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 0c067e8564dd..784c7155b98a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* request pf to config rss table for vf queues*/
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4693d004a223..e1d445dd8564 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4957,6 +4957,12 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ /* On some BE3 FW versions, after a HW reset,
+ * interrupts will remain disabled for each function.
+ * So, explicitly enable interrupts
+ */
+ be_intr_set(adapter, true);
+
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 6e664d9038d6..b78378cea5e3 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2027,14 +2027,14 @@ jme_fill_tx_map(struct pci_dev *pdev,
return 0;
}
-static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx)
+static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
{
struct jme_ring *txring = &(jme->txring[0]);
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
int mask = jme->tx_ring_mask;
int j;
- for (j = startidx ; j < endidx ; ++j) {
+ for (j = 0 ; j < count ; j++) {
ctxbi = txbi + ((startidx + j + 2) & (mask));
pci_unmap_page(jme->pdev,
ctxbi->mapping,
@@ -2069,7 +2069,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
skb_frag_page(frag),
frag->page_offset, skb_frag_size(frag), hidma);
if (ret) {
- jme_drop_tx_map(jme, idx, idx+i);
+ jme_drop_tx_map(jme, idx, i);
goto out;
}
@@ -2081,7 +2081,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
offset_in_page(skb->data), len, hidma);
if (ret)
- jme_drop_tx_map(jme, idx, idx+i);
+ jme_drop_tx_map(jme, idx, i);
out:
return ret;
@@ -2269,7 +2269,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (jme_fill_tx_desc(jme, skb, idx))
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index ca8e7cb5a8e4..a89e46430c74 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1262,12 +1262,12 @@ static struct mlx4_cmd_info cmd_info[] = {
},
{
.opcode = MLX4_CMD_UPDATE_QP,
- .has_inbox = false,
+ .has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = mlx4_CMD_EPERM_wrapper
+ .wrapper = mlx4_UPDATE_QP_wrapper
},
{
.opcode = MLX4_CMD_GET_OP_REQ,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 52c1e7da74c4..9dd1b30ea757 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1196,6 +1196,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 9bdb6aeb3721..1d3234a6744d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -389,6 +389,41 @@ err_icm:
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ enum mlx4_update_qp_attr attr,
+ struct mlx4_update_qp_params *params)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_update_qp_context *cmd;
+ u64 pri_addr_path_mask = 0;
+ int err = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ cmd = (struct mlx4_update_qp_context *)mailbox->buf;
+
+ if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
+ return -EINVAL;
+
+ if (attr & MLX4_UPDATE_QP_SMAC) {
+ pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
+ cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
+ }
+
+ cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
+
+ err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_update_qp);
+
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 4094e11f9d4d..dd821b363686 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd_info)
+{
+ int err;
+ u32 qpn = vhcr->in_modifier & 0xffffff;
+ struct res_qp *rqp;
+ u64 mac;
+ unsigned port;
+ u64 pri_addr_path_mask;
+ struct mlx4_update_qp_context *cmd;
+ int smac_index;
+
+ cmd = (struct mlx4_update_qp_context *)inbox->buf;
+
+ pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
+ if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
+ (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
+ return -EPERM;
+
+ /* Just change the smac for the QP */
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err) {
+ mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
+ return err;
+ }
+
+ port = (rqp->sched_queue >> 6 & 1) + 1;
+ smac_index = cmd->qp_context.pri_path.grh_mylmc;
+ err = mac_find_smac_ix_in_slave(dev, slave, port,
+ smac_index, &mac);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
+ qpn, smac_index);
+ goto err_mac;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma,
+ vhcr->in_modifier, 0,
+ MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
+ goto err_mac;
+ }
+
+err_mac:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 32d969e857f7..89b83e59e1dc 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
efx->net_dev->rx_cpu_rmap = NULL;
#endif
- /* Disable MSI/MSI-X interrupts */
- efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->msi_context[channel->channel]);
-
- /* Disable legacy interrupt */
- if (efx->legacy_irq)
+ if (EFX_INT_MODE_USE_MSI(efx)) {
+ /* Disable MSI/MSI-X interrupts */
+ efx_for_each_channel(channel, efx)
+ free_irq(channel->irq,
+ &efx->msi_context[channel->channel]);
+ } else {
+ /* Disable legacy interrupt */
free_irq(efx->legacy_irq, efx);
+ }
}
/* Register dump */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 93cf4f63f426..110ca1c766d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev)
if (ret) {
pr_err("%s: Cannot attach to PHY (error: %d)\n",
__func__, ret);
- goto phy_error;
+ return ret;
}
}
@@ -1779,8 +1779,6 @@ init_error:
dma_desc_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
-phy_error:
- clk_disable_unprepare(priv->stmmac_clk);
return ret;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f4701da19a02..a665e902b989 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -528,8 +528,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ }
}
static void macvlan_set_mac_lists(struct net_device *dev)
@@ -585,6 +587,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+static int macvlan_get_nest_level(struct net_device *dev)
+{
+ return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
+}
+
static void macvlan_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
@@ -595,8 +602,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
static void macvlan_set_lockdep_class(struct net_device *dev)
{
- lockdep_set_class(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key);
+ lockdep_set_class_and_subclass(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key,
+ macvlan_get_nest_level(dev));
netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
}
@@ -790,6 +798,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_fdb_add = macvlan_fdb_add,
.ndo_fdb_del = macvlan_fdb_del,
.ndo_fdb_dump = ndo_dflt_fdb_dump,
+ .ndo_get_lock_subclass = macvlan_get_nest_level,
};
void macvlan_common_setup(struct net_device *dev)
@@ -922,6 +931,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a972056b2249..3bc079a67a3d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work)
struct delayed_work *dwork = to_delayed_work(work);
struct phy_device *phydev =
container_of(dwork, struct phy_device, state_queue);
- int needs_aneg = 0, do_suspend = 0;
+ bool needs_aneg = false, do_suspend = false, do_resume = false;
int err = 0;
mutex_lock(&phydev->lock);
@@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work)
case PHY_PENDING:
break;
case PHY_UP:
- needs_aneg = 1;
+ needs_aneg = true;
phydev->link_timeout = PHY_AN_TIMEOUT;
@@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
} else if (0 == phydev->link_timeout--)
- needs_aneg = 1;
+ needs_aneg = true;
break;
case PHY_NOLINK:
err = phy_read_status(phydev);
@@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work)
netif_carrier_on(phydev->attached_dev);
} else {
if (0 == phydev->link_timeout--)
- needs_aneg = 1;
+ needs_aneg = true;
}
phydev->adjust_link(phydev->attached_dev);
@@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work)
phydev->link = 0;
netif_carrier_off(phydev->attached_dev);
phydev->adjust_link(phydev->attached_dev);
- do_suspend = 1;
+ do_suspend = true;
}
break;
case PHY_RESUMING:
@@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work)
}
phydev->adjust_link(phydev->attached_dev);
}
+ do_resume = true;
break;
}
@@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work)
if (needs_aneg)
err = phy_start_aneg(phydev);
-
- if (do_suspend)
+ else if (do_suspend)
phy_suspend(phydev);
+ else if (do_resume)
+ phy_resume(phydev);
if (err < 0)
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index eb3b946bd8c0..35d753d22f78 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -615,8 +615,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
err = phy_init_hw(phydev);
if (err)
phy_detach(phydev);
-
- phy_resume(phydev);
+ else
+ phy_resume(phydev);
return err;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index f46cd0250e48..5627917c5ff7 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
- bss_conf->enable_beacon)
+ bss_conf->enable_beacon) {
priv->reconfig_beacon = true;
+ priv->rearm_ani = true;
+ }
if (bss_conf->assoc) {
priv->rearm_ani = true;
@@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_htc_ps_wakeup(priv);
+ ath9k_htc_stop_ani(priv);
del_timer_sync(&priv->tx.cleanup_timer);
ath9k_htc_tx_drain(priv);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index afb3d15e38ff..be1985296bdc 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
if (!err) {
/* only set 2G bandwidth using bw_cap command */
band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
- band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+ band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ);
err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
sizeof(band_bwcap));
} else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 8f4b03dbaf3f..4284672d0397 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) {
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+ BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
}
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+ bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
if (mvm->cfg->bt_shared_single_ant)
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 6174c027ff59..6959fda3fe09 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -187,9 +187,9 @@ enum iwl_scan_type {
* this number of packets were received (typically 1)
* @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
+ * @max_out_time: in TUs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
- * bits 0-19: beacon interal in usecs (suspend before executing)
+ * bits 0-19: beacon interal in TUs (suspend before executing)
* bits 20-23: reserved
* bits 24-31: number of beacons (suspend between channels)
* @rxon_flags: RXON_FLG_*
@@ -387,8 +387,8 @@ enum scan_framework_client {
* @quiet_plcp_th: quiet channel num of packets threshold
* @good_CRC_th: passive to active promotion threshold
* @rx_chain: RXON rx chain.
- * @max_out_time: max uSec to be out of assoceated channel
- * @suspend_time: pause scan this long when returning to service channel
+ * @max_out_time: max TUs to be out of assoceated channel
+ * @suspend_time: pause scan this TUs when returning to service channel
* @flags: RXON flags
* @filter_flags: RXONfilter
* @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 97c3deae6552..32682edfe5a4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1005,7 +1005,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
@@ -1021,7 +1021,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
return;
- ieee80211_iterate_active_interfaces(
+ ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_mc_iface_iterator, &iter_data);
}
@@ -1814,6 +1814,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_is_idle(mvm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
switch (mvm->scan_status) {
case IWL_MVM_SCAN_OS:
IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 17c42da5f9f2..107d864b3c0e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -1015,6 +1015,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
return mvmvif->low_latency;
}
+/* Assoc status */
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
+
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index d44b2b33b5cc..857ddaf6f48c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -1031,7 +1031,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
return;
}
-#ifdef CPTCFG_MAC80211_DEBUGFS
+#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate */
if (lq_sta->dbg_fixed_rate) {
IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 63e7b16edb55..36ae01a18dee 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_bound);
- /*
- * Under low latency traffic passive scan is fragmented meaning
- * that dwell on a particular channel will be fragmented. Each fragment
- * dwell time is 20ms and fragments period is 105ms. Skipping to next
- * channel will be delayed by the same period - 105ms. So suspend_time
- * parameter describing both fragments and channels skipping periods is
- * set to 105ms. This value is chosen so that overall passive scan
- * duration will not be too long. Max_out_time in this case is set to
- * 70ms, so for active scanning operating channel will be left for 70ms
- * while for passive still for 20ms (fragment dwell).
- */
- if (global_bound) {
- if (!iwl_mvm_low_latency(mvm)) {
- params->suspend_time = ieee80211_tu_to_usec(100);
- params->max_out_time = ieee80211_tu_to_usec(600);
- } else {
- params->suspend_time = ieee80211_tu_to_usec(105);
- /* P2P doesn't support fragmented passive scan, so
- * configure max_out_time to be at least longest dwell
- * time for passive scan.
- */
- if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
- params->max_out_time = ieee80211_tu_to_usec(70);
- params->passive_fragmented = true;
- } else {
- u32 passive_dwell;
- /*
- * Use band G so that passive channel dwell time
- * will be assigned with maximum value.
- */
- band = IEEE80211_BAND_2GHZ;
- passive_dwell = iwl_mvm_get_passive_dwell(band);
- params->max_out_time =
- ieee80211_tu_to_usec(passive_dwell);
- }
- }
+ if (!global_bound)
+ goto not_bound;
+
+ params->suspend_time = 100;
+ params->max_out_time = 600;
+
+ if (iwl_mvm_low_latency(mvm)) {
+ params->suspend_time = 250;
+ params->max_out_time = 250;
}
+not_bound:
+
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
- if (params->passive_fragmented)
- params->dwell[band].passive = 20;
- else
- params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
+ params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band,
n_ssids);
}
@@ -770,7 +741,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
- int tail = band_2ghz + band_5ghz;
+ int tail = band_2ghz + band_5ghz - 1;
u32 ssid_bitmap;
int cmd_len;
int ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index c5f4532cafa9..eb2ca64820fc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -688,3 +688,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
return result;
}
+
+static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+ bool *idle = _data;
+
+ if (!vif->bss_conf.idle)
+ *idle = false;
+}
+
+bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
+{
+ bool idle = true;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_idle_iter, &idle);
+
+ return idle;
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f98ef1e62eb9..c76b148e1aba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1788,6 +1788,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ trans->dev = &pdev->dev;
+ trans_pcie->pci_dev = pdev;
+ iwl_disable_interrupts(trans);
+
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@ -1799,8 +1803,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
- trans->dev = &pdev->dev;
- trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@ -1826,8 +1828,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_pci_disable_msi;
}
- trans_pcie->inta_mask = CSR_INI_SET_MASK;
-
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
@@ -1839,6 +1839,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_free_ict;
}
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
+
return trans;
out_free_ict:
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 630a3fcf65bc..0d4a285cbd7e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif,
grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xenvif_check_rx_xenvif(struct xenvif *vif);
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif);
/* Prevent the device from generating any further traffic. */
void xenvif_carrier_off(struct xenvif *vif);
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index a7557331699f..53cdcdf3dfa1 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) {
- int more_to_do = 0;
- unsigned long flags;
-
- /* It is necessary to disable IRQ before calling
- * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
- * lose event from the frontend.
- *
- * Consider:
- * RING_HAS_UNCONSUMED_REQUESTS
- * <frontend generates event to trigger napi_schedule>
- * __napi_complete
- *
- * This handler is still in scheduled state so the
- * event has no effect at all. After __napi_complete
- * this handler is descheduled and cannot get
- * scheduled again. We lose event in this case and the ring
- * will be completely stalled.
- */
-
- local_irq_save(flags);
-
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
- if (!more_to_do)
- __napi_complete(napi);
-
- local_irq_restore(flags);
+ napi_complete(napi);
+ xenvif_napi_schedule_or_enable_events(vif);
}
return work_done;
@@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif)
enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq);
- xenvif_check_rx_xenvif(vif);
+ xenvif_napi_schedule_or_enable_events(vif);
}
static void xenvif_down(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 76665405c5aa..7367208ee8cd 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
-static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
@@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/*
+ * Find the grant ref for a given frag in a chain of struct ubuf_info's
+ * skb: the skb itself
+ * i: the frag's number
+ * ubuf: a pointer to an element in the chain. It should not be NULL
+ *
+ * Returns a pointer to the element in the chain where the page were found. If
+ * not found, returns NULL.
+ * See the definition of callback_struct in common.h for more details about
+ * the chain.
+ */
+static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
+ const int i,
+ const struct ubuf_info *ubuf)
+{
+ struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
+
+ do {
+ u16 pending_idx = ubuf->desc;
+
+ if (skb_shinfo(skb)->frags[i].page.p ==
+ foreign_vif->mmap_pages[pending_idx])
+ break;
+ ubuf = (struct ubuf_info *) ubuf->ctx;
+ } while (ubuf);
+
+ return ubuf;
+}
+
+/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
@@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
- grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
- struct xenvif *foreign_vif = NULL;
+ const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+ const struct ubuf_info *const head_ubuf = ubuf;
old_meta_prod = npo->meta_prod;
@@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb,
npo->copy_off = 0;
npo->copy_gref = req->gref;
- if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
- (ubuf->callback == &xenvif_zerocopy_callback)) {
- int i = 0;
- foreign_vif = ubuf_to_vif(ubuf);
-
- do {
- u16 pending_idx = ubuf->desc;
- foreign_grefs[i++] =
- foreign_vif->pending_tx_info[pending_idx].req.gref;
- ubuf = (struct ubuf_info *) ubuf->ctx;
- } while (ubuf);
- }
-
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
@@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
for (i = 0; i < nr_frags; i++) {
+ /* This variable also signals whether foreign_gref has a real
+ * value or not.
+ */
+ struct xenvif *foreign_vif = NULL;
+ grant_ref_t foreign_gref;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+ (ubuf->callback == &xenvif_zerocopy_callback)) {
+ const struct ubuf_info *const startpoint = ubuf;
+
+ /* Ideally ubuf points to the chain element which
+ * belongs to this frag. Or if frags were removed from
+ * the beginning, then shortly before it.
+ */
+ ubuf = xenvif_find_gref(skb, i, ubuf);
+
+ /* Try again from the beginning of the list, if we
+ * haven't tried from there. This only makes sense in
+ * the unlikely event of reordering the original frags.
+ * For injected local pages it's an unnecessary second
+ * run.
+ */
+ if (unlikely(!ubuf) && startpoint != head_ubuf)
+ ubuf = xenvif_find_gref(skb, i, head_ubuf);
+
+ if (likely(ubuf)) {
+ u16 pending_idx = ubuf->desc;
+
+ foreign_vif = ubuf_to_vif(ubuf);
+ foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
+ /* Just a safety measure. If this was the last
+ * element on the list, the for loop will
+ * iterate again if a local page were added to
+ * the end. Using head_ubuf here prevents the
+ * second search on the chain. Or the original
+ * frags changed order, but that's less likely.
+ * In any way, ubuf shouldn't be NULL.
+ */
+ ubuf = ubuf->ctx ?
+ (struct ubuf_info *) ubuf->ctx :
+ head_ubuf;
+ } else
+ /* This frag was a local page, added to the
+ * array after the skb left netback.
+ */
+ ubuf = head_ubuf;
+ }
xenvif_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head,
foreign_vif,
- foreign_grefs[i]);
+ foreign_vif ? foreign_gref : UINT_MAX);
}
return npo->meta_prod - old_meta_prod;
@@ -654,7 +716,7 @@ done:
notify_remote_via_irq(vif->rx_irq);
}
-void xenvif_check_rx_xenvif(struct xenvif *vif)
+void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
{
int more_to_do;
@@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
- xenvif_check_rx_xenvif(vif);
+ xenvif_napi_schedule_or_enable_events(vif);
}
static void xenvif_tx_err(struct xenvif *vif,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 6d4ee22708c9..32e969d95319 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop)
if (!found)
return -ENODEV;
+ /* At early boot, bail out and defer setup to of_init() */
+ if (!of_kset)
+ return found ? 0 : -ENODEV;
+
/* Update the sysfs attribute */
sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
__of_add_property_sysfs(np, newprop);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index d3d1cfd51e09..e384e2534594 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
return PCIBIOS_SUCCESSFUL;
}
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+ phys_addr_t base, size_t size)
+{
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+
+ mvebu_mbus_del_window(base, sz);
+ base += sz;
+ size -= sz;
+ }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ size_t size_mapped = 0;
+
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+ int ret;
+
+ ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ sz, remap);
+ if (ret) {
+ dev_err(&port->pcie->pdev->dev,
+ "Could not create MBus window at 0x%x, size 0x%x: %d\n",
+ base, sz, ret);
+ mvebu_pcie_del_windows(port, base - size_mapped,
+ size_mapped);
+ return;
+ }
+
+ size -= sz;
+ size_mapped += sz;
+ base += sz;
+ if (remap != MVEBU_MBUS_NO_REMAP)
+ remap += sz;
+ }
+}
+
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
phys_addr_t iobase;
@@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
/* If a window was configured, remove it */
if (port->iowin_base) {
- mvebu_mbus_del_window(port->iowin_base,
- port->iowin_size);
+ mvebu_pcie_del_windows(port, port->iowin_base,
+ port->iowin_size);
port->iowin_base = 0;
port->iowin_size = 0;
}
@@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
port->iowin_base = port->pcie->io.start + iobase;
port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
(port->bridge.iolimitupper << 16)) -
- iobase);
+ iobase) + 1;
- mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
- port->iowin_base, port->iowin_size,
- iobase);
+ mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+ port->iowin_base, port->iowin_size,
+ iobase);
}
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
@@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
/* If a window was configured, remove it */
if (port->memwin_base) {
- mvebu_mbus_del_window(port->memwin_base,
- port->memwin_size);
+ mvebu_pcie_del_windows(port, port->memwin_base,
+ port->memwin_size);
port->memwin_base = 0;
port->memwin_size = 0;
}
@@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
port->memwin_size =
(((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
- port->memwin_base;
+ port->memwin_base + 1;
- mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
- port->memwin_base, port->memwin_size);
+ mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+ port->memwin_base, port->memwin_size,
+ MVEBU_MBUS_NO_REMAP);
}
/*
@@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
/*
* On the PCI-to-PCI bridge side, the I/O windows must have at
- * least a 64 KB size and be aligned on their size, and the
- * memory windows must have at least a 1 MB size and be
- * aligned on their size
+ * least a 64 KB size and the memory windows must have at
+ * least a 1 MB size. Moreover, MBus windows need to have a
+ * base address aligned on their size, and their size must be
+ * a power of two. This means that if the BAR doesn't have a
+ * power of two size, several MBus windows will actually be
+ * created. We need to ensure that the biggest MBus window
+ * (which will be the first one) is aligned on its size, which
+ * explains the rounddown_pow_of_two() being done here.
*/
if (res->flags & IORESOURCE_IO)
- return round_up(start, max_t(resource_size_t, SZ_64K, size));
+ return round_up(start, max_t(resource_size_t, SZ_64K,
+ rounddown_pow_of_two(size)));
else if (res->flags & IORESOURCE_MEM)
- return round_up(start, max_t(resource_size_t, SZ_1M, size));
+ return round_up(start, max_t(resource_size_t, SZ_1M,
+ rounddown_pow_of_two(size)));
else
return start;
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 58499277903a..6efc2ec5e4db 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot)
return WRONG_BUS_FREQUENCY;
}
- bsp = ctrl->pci_dev->bus->cur_bus_speed;
- msp = ctrl->pci_dev->bus->max_bus_speed;
+ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+ msp = ctrl->pci_dev->subordinate->max_bus_speed;
/* Check if there are other slots or devices on the same bus */
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7325d43bf030..759475ef6ff3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3067,7 +3067,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return 1;
- return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND);
+ return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
+ PCI_EXP_DEVSTA_TRPND);
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
@@ -3109,7 +3110,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
return 0;
/* Wait for Transaction Pending bit clean */
- if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP))
+ if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
goto clear;
dev_err(&dev->dev, "transaction is not cleared; "
diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
index deb7f4bcdb7b..438d4c72c7b3 100644
--- a/drivers/pnp/pnpbios/bioscalls.c
+++ b/drivers/pnp/pnpbios/bioscalls.c
@@ -37,7 +37,7 @@ __visible struct {
* kernel begins at offset 3GB...
*/
-asmlinkage void pnp_bios_callfunc(void);
+asmlinkage __visible void pnp_bios_callfunc(void);
__asm__(".text \n"
__ALIGN_STR "\n"
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index bd628a6f981d..e5f13c4310fe 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client,
if (IS_ERR(hym8563->rtc))
return PTR_ERR(hym8563->rtc);
+ /* the hym8563 alarm only supports a minute accuracy */
+ hym8563->rtc->uie_unsupported = 1;
+
#ifdef CONFIG_COMMON_CLK
hym8563_clkout_register_clk(hym8563);
#endif
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 5c8f8226c848..4cdb64be061b 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -206,7 +206,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_hour = bcd2bin(regs[2] & 0x3f);
tm->tm_mday = bcd2bin(regs[3] & 0x3f);
tm->tm_wday = regs[4] & 0x7;
- tm->tm_mon = bcd2bin(regs[5] & 0x1f);
+ tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1;
tm->tm_year = bcd2bin(regs[6]) + 100;
return rtc_valid_tm(tm);
@@ -229,7 +229,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
regs[3] = bin2bcd(tm->tm_hour);
regs[4] = bin2bcd(tm->tm_mday);
regs[5] = tm->tm_wday;
- regs[6] = bin2bcd(tm->tm_mon);
+ regs[6] = bin2bcd(tm->tm_mon + 1);
regs[7] = bin2bcd(tm->tm_year - 100);
msg.addr = client->addr;
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index fc67f564f02c..788ed9b59b4e 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -1,10 +1,12 @@
#
# Makefile for the SuperH specific drivers.
#
-obj-y := intc/
+obj-$(CONFIG_SUPERH) += intc/
+obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/
+ifneq ($(CONFIG_COMMON_CLK),y)
+obj-$(CONFIG_HAVE_CLK) += clk/
+endif
+obj-$(CONFIG_MAPLE) += maple/
+obj-$(CONFIG_SUPERHYWAY) += superhyway/
-obj-$(CONFIG_HAVE_CLK) += clk/
-obj-$(CONFIG_MAPLE) += maple/
-obj-$(CONFIG_SUPERHYWAY) += superhyway/
-
-obj-y += pm_runtime.o
+obj-y += pm_runtime.o
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 8afa5a4589f2..10c65eb51f85 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
.con_ids = { NULL, },
};
+static bool default_pm_on;
+
static int __init sh_pm_runtime_init(void)
{
+ if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+ if (!of_machine_is_compatible("renesas,emev2") &&
+ !of_machine_is_compatible("renesas,r7s72100") &&
+ !of_machine_is_compatible("renesas,r8a73a4") &&
+ !of_machine_is_compatible("renesas,r8a7740") &&
+ !of_machine_is_compatible("renesas,r8a7778") &&
+ !of_machine_is_compatible("renesas,r8a7779") &&
+ !of_machine_is_compatible("renesas,r8a7790") &&
+ !of_machine_is_compatible("renesas,r8a7791") &&
+ !of_machine_is_compatible("renesas,sh7372") &&
+ !of_machine_is_compatible("renesas,sh73a0"))
+ return 0;
+ }
+
+ default_pm_on = true;
pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
@@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init);
static int __init sh_pm_runtime_late_init(void)
{
- pm_genpd_poweroff_unused();
+ if (default_pm_on)
+ pm_genpd_poweroff_unused();
return 0;
}
late_initcall(sh_pm_runtime_late_init);
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 713af4806f26..f6759dc0153b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
struct sg_table *sgt;
void *buf, *pbuf;
- /*
- * Some DMA controllers have problems transferring buffers that are
- * not multiple of 4 bytes. So we truncate the transfer so that it
- * is suitable for such controllers, and handle the trailing bytes
- * manually after the DMA completes.
- *
- * REVISIT: It would be better if this information could be
- * retrieved directly from the DMA device in a similar way than
- * ->copy_align etc. is done.
- */
- len = ALIGN(drv_data->len, 4);
-
if (dir == DMA_TO_DEVICE) {
dmadev = drv_data->tx_chan->device->dev;
sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
if (!error) {
pxa2xx_spi_unmap_dma_buffers(drv_data);
- /* Handle the last bytes of unaligned transfer */
drv_data->tx += drv_data->tx_map_len;
- drv_data->write(drv_data);
-
drv_data->rx += drv_data->rx_map_len;
- drv_data->read(drv_data);
msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index b032e8885e24..78c66e3c53ed 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
int ret;
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4eb9bf02996c..939edf473235 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable);
}
+#ifdef CONFIG_HAS_DMA
static int spi_map_buf(struct spi_master *master, struct device *dev,
struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
}
}
-static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
{
struct device *tx_dev, *rx_dev;
struct spi_transfer *xfer;
- void *tmp;
- unsigned int max_tx, max_rx;
int ret;
- if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
- max_tx = 0;
- max_rx = 0;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if ((master->flags & SPI_MASTER_MUST_TX) &&
- !xfer->tx_buf)
- max_tx = max(xfer->len, max_tx);
- if ((master->flags & SPI_MASTER_MUST_RX) &&
- !xfer->rx_buf)
- max_rx = max(xfer->len, max_rx);
- }
-
- if (max_tx) {
- tmp = krealloc(master->dummy_tx, max_tx,
- GFP_KERNEL | GFP_DMA);
- if (!tmp)
- return -ENOMEM;
- master->dummy_tx = tmp;
- memset(tmp, 0, max_tx);
- }
-
- if (max_rx) {
- tmp = krealloc(master->dummy_rx, max_rx,
- GFP_KERNEL | GFP_DMA);
- if (!tmp)
- return -ENOMEM;
- master->dummy_rx = tmp;
- }
-
- if (max_tx || max_rx) {
- list_for_each_entry(xfer, &msg->transfers,
- transfer_list) {
- if (!xfer->tx_buf)
- xfer->tx_buf = master->dummy_tx;
- if (!xfer->rx_buf)
- xfer->rx_buf = master->dummy_rx;
- }
- }
- }
-
if (!master->can_dma)
return 0;
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
return 0;
}
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static inline int spi_unmap_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return 0;
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ return __spi_map_msg(master, msg);
+}
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
{
int ret;
- master->queued = true;
master->transfer = spi_queued_transfer;
if (!master->transfer_one_message)
master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
dev_err(&master->dev, "problem initializing queue\n");
goto err_init_queue;
}
+ master->queued = true;
ret = spi_start_queue(master);
if (ret) {
dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
return 0;
err_start_queue:
-err_init_queue:
spi_destroy_queue(master);
+err_init_queue:
return ret;
}
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
*/
int spi_setup(struct spi_device *spi)
{
- unsigned bad_bits;
+ unsigned bad_bits, ugly_bits;
int status = 0;
/* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
* that aren't supported with their current master
*/
bad_bits = spi->mode & ~spi->master->mode_bits;
+ ugly_bits = bad_bits &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+ if (ugly_bits) {
+ dev_warn(&spi->dev,
+ "setup: ignoring unsupported mode bits %x\n",
+ ugly_bits);
+ spi->mode &= ~ugly_bits;
+ bad_bits &= ~ugly_bits;
+ }
if (bad_bits) {
dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
bad_bits);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 4144a75e5f71..c270c9ae6d27 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node,
of_node_put(port);
if (port == imx_crtc->port) {
ret = of_graph_parse_endpoint(ep, &endpoint);
- return ret ? ret : endpoint.id;
+ return ret ? ret : endpoint.port;
}
} while (ep);
@@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev)
if (!remote || !of_device_is_available(remote)) {
of_node_put(remote);
continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(&pdev->dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
}
ret = imx_drm_add_component(&pdev->dev, remote);
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 575533f4fd64..a23f4f773146 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
tve->dev = dev;
spin_lock_init(&tve->lock);
- ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0);
+ ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
tve->ddc = of_find_i2c_adapter_by_node(ddc_node);
of_node_put(ddc_node);
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index 8c101cbbee97..acc8184c46cd 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq)
struct vpfe_fh *fh = vb2_get_drv_priv(vq);
struct vpfe_video_device *video = fh->video;
- if (!vb2_is_streaming(vq))
- return 0;
/* release all active buffers */
+ if (video->cur_frm == video->next_frm) {
+ vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (video->cur_frm != NULL)
+ vb2_buffer_done(&video->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (video->next_frm != NULL)
+ vb2_buffer_done(&video->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
while (!list_empty(&video->dma_queue)) {
video->next_frm = list_entry(video->dma_queue.next,
struct vpfe_cap_buffer, list);
diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h
index b3d2cc729657..4ba569258498 100644
--- a/drivers/staging/media/sn9c102/sn9c102_devtable.h
+++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h
@@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = {
{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
/* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
-#endif
{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
{ SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c
index 57eca7a45672..4fe751f7c2bf 100644
--- a/drivers/staging/rtl8723au/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c
@@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev)
#endif /* CONFIG_8723AU_P2P */
rtw_scan_abort23a(padapter);
- /* set this at the end */
- padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR;
RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n"));
DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
index c49160e477d8..07e542e5d156 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c
@@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr)
if (addr == RECV_BULK_IN_ADDR) {
pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
} else if (addr == RECV_INT_IN_ADDR) {
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+ pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]);
} else if (addr < HW_QUEUE_ENTRY) {
ep_num = pdvobj->Queue2Pipe[addr];
pipe = usb_sndbulkpipe(pusbd, ep_num);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 78cab13bbb1b..46588c85d39b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Initiator is expecting a NopIN ping reply..
*/
if (hdr->itt != RESERVED_ITT) {
- BUG_ON(!cmd);
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 6960f22909ae..302eb3b78715 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -775,6 +775,7 @@ struct iscsi_np {
int np_ip_proto;
int np_sock_type;
enum np_thread_state_table np_thread_state;
+ bool enabled;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8739b98f6f93..ca31fa1b8a4b 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2(
}
off = mrdsl % PAGE_SIZE;
if (!off)
- return 0;
+ goto check_prot;
if (mrdsl < PAGE_SIZE)
mrdsl = PAGE_SIZE;
@@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
return -1;
}
+ /*
+ * ISER currently requires that ImmediateData + Unsolicited
+ * Data be disabled when protection / signature MRs are enabled.
+ */
+check_prot:
+ if (sess->se_sess->sup_prot_ops &
+ (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
+ TARGET_PROT_DOUT_INSERT)) {
+
+ sprintf(buf, "ImmediateData=No");
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+
+ sprintf(buf, "InitialR2T=Yes");
+ if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_NO_RESOURCES);
+ return -1;
+ }
+ pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
+ " T10-PI enabled ISER session\n");
+ }
}
return 0;
@@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket(
}
np->np_transport = t;
+ np->enabled = true;
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index eb96b20dc09e..ca1811858afd 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread(
return;
}
+ tpg_np->tpg_np->enabled = false;
iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 65001e133670..26416c15d65c 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("emulate_write_cache not supported for pSCSI\n");
return -EINVAL;
}
- if (dev->transport->get_write_cache) {
- pr_warn("emulate_write_cache cannot be changed when underlying"
- " HW reports WriteCacheEnabled, ignoring request\n");
- return 0;
+ if (flag &&
+ dev->transport->get_write_cache) {
+ pr_err("emulate_write_cache not supported for this device\n");
+ return -EINVAL;
}
dev->dev_attrib.emulate_write_cache = flag;
@@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
return 0;
}
if (!dev->transport->init_prot || !dev->transport->free_prot) {
+ /* 0 is only allowed value for non-supporting backends */
+ if (flag == 0)
+ return 0;
+
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name);
return -ENOSYS;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index d4b98690a736..789aa9eb0a1e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1113,6 +1113,7 @@ void transport_init_se_cmd(
init_completion(&cmd->cmd_wait_comp);
init_completion(&cmd->task_stop_comp);
spin_lock_init(&cmd->t_state_lock);
+ kref_init(&cmd->cmd_kref);
cmd->transport_state = CMD_T_DEV_ACTIVE;
cmd->se_tfo = tfo;
@@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
unsigned long flags;
int ret = 0;
- kref_init(&se_cmd->cmd_kref);
/*
* Add a second kref if the fabric caller is expecting to handle
* fabric acknowledgement that requires two target_put_sess_cmd()
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 01cf37f212c3..f5fd515b2bee 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
struct fc_lport *lport;
- struct se_session *se_sess;
+ struct ft_sess *sess;
if (!cmd)
return;
- se_sess = cmd->sess->se_sess;
+ sess = cmd->sess;
fp = cmd->req_frame;
lport = fr_dev(fp);
if (fr_seq(fp))
lport->tt.seq_release(fr_seq(fp));
fc_frame_free(fp);
- percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
- ft_sess_put(cmd->sess); /* undo get from lookup at recv */
+ percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+ ft_sess_put(sess); /* undo get from lookup at recv */
}
void ft_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 96109a9972b6..84b4bfb84344 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
static unsigned event_array_pages __read_mostly;
+/*
+ * sync_set_bit() and friends must be unsigned long aligned on non-x86
+ * platforms.
+ */
+#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
+
+#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
+#define EVTCHN_FIFO_BIT(b, w) \
+ (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
+
+#else
+
#define BM(w) ((unsigned long *)(w))
+#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
+
+#endif
static inline event_word_t *event_word_from_port(unsigned port)
{
@@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
static void evtchn_fifo_clear_pending(unsigned port)
{
event_word_t *word = event_word_from_port(port);
- sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word));
+ sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
static void evtchn_fifo_set_pending(unsigned port)
{
event_word_t *word = event_word_from_port(port);
- sync_set_bit(EVTCHN_FIFO_PENDING, BM(word));
+ sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
static bool evtchn_fifo_is_pending(unsigned port)
{
event_word_t *word = event_word_from_port(port);
- return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word));
+ return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
static bool evtchn_fifo_test_and_set_mask(unsigned port)
{
event_word_t *word = event_word_from_port(port);
- return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+ return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
static void evtchn_fifo_mask(unsigned port)
{
event_word_t *word = event_word_from_port(port);
- sync_set_bit(EVTCHN_FIFO_MASKED, BM(word));
+ sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
+static bool evtchn_fifo_is_masked(unsigned port)
+{
+ event_word_t *word = event_word_from_port(port);
+ return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
+}
/*
* Clear MASKED, spinning if BUSY is set.
*/
@@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port)
BUG_ON(!irqs_disabled());
clear_masked(word);
- if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) {
+ if (evtchn_fifo_is_pending(port)) {
struct evtchn_unmask unmask = { .port = port };
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
}
@@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port)
static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, uint32_t *ready)
+ unsigned priority, unsigned long *ready)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu,
* copy of the ready word.
*/
if (head == 0)
- clear_bit(priority, BM(ready));
+ clear_bit(priority, ready);
- if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))
- && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word)))
+ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
handle_irq_for_port(port);
q->head[priority] = head;
@@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu,
static void evtchn_fifo_handle_events(unsigned cpu)
{
struct evtchn_fifo_control_block *control_block;
- uint32_t ready;
+ unsigned long ready;
unsigned q;
control_block = per_cpu(cpu_control_block, cpu);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 6d589f28bf9b..895ac7dc9dbf 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -340,8 +340,6 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
&blocksize,&sbi->s_prefix,
sbi->s_volume, &mount_flags)) {
printk(KERN_ERR "AFFS: Error parsing options\n");
- kfree(sbi->s_prefix);
- kfree(sbi);
return -EINVAL;
}
/* N.B. after this point s_prefix must be released */
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 2caf36ac3e93..cc87c1abac97 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
spin_lock(&active->d_lock);
/* Already gone? */
- if (!d_count(active))
+ if ((int) d_count(active) <= 0)
goto next;
qstr = &active->d_name;
@@ -230,7 +230,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
spin_lock(&expiring->d_lock);
- /* Bad luck, we've already been dentry_iput */
+ /* We've already been dentry_iput or unlinked */
if (!expiring->d_inode)
goto next;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 2ad7de94efef..2f6d7b13b5bd 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3120,6 +3120,8 @@ process_slot:
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
u64 skip = 0;
u64 trim = 0;
+ u64 aligned_end = 0;
+
if (off > key.offset) {
skip = off - key.offset;
new_key.offset += skip;
@@ -3136,9 +3138,11 @@ process_slot:
size -= skip + trim;
datal -= skip + trim;
+ aligned_end = ALIGN(new_key.offset + datal,
+ root->sectorsize);
ret = btrfs_drop_extents(trans, root, inode,
new_key.offset,
- new_key.offset + datal,
+ aligned_end,
1);
if (ret) {
if (ret != -EOPNOTSUPP)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index eb6537a08c1b..fd38b5053479 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1668,7 +1668,7 @@ static int get_first_ref(struct btrfs_root *root, u64 ino,
goto out;
}
- if (key.type == BTRFS_INODE_REF_KEY) {
+ if (found_key.type == BTRFS_INODE_REF_KEY) {
struct btrfs_inode_ref *iref;
iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index aadc2b68678b..a22d667f1069 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1737,6 +1737,9 @@ cifs_inode_needs_reval(struct inode *inode)
if (cifs_i->time == 0)
return true;
+ if (!cifs_sb->actimeo)
+ return true;
+
if (!time_in_range(jiffies, cifs_i->time,
cifs_i->time + cifs_sb->actimeo))
return true;
diff --git a/fs/dcache.c b/fs/dcache.c
index 40707d88a945..42ae01eefc07 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -246,16 +246,8 @@ static void __d_free(struct rcu_head *head)
kmem_cache_free(dentry_cache, dentry);
}
-/*
- * no locks, please.
- */
-static void d_free(struct dentry *dentry)
+static void dentry_free(struct dentry *dentry)
{
- BUG_ON((int)dentry->d_lockref.count > 0);
- this_cpu_dec(nr_dentry);
- if (dentry->d_op && dentry->d_op->d_release)
- dentry->d_op->d_release(dentry);
-
/* if dentry was never visible to RCU, immediate free is OK */
if (!(dentry->d_flags & DCACHE_RCUACCESS))
__d_free(&dentry->d_u.d_rcu);
@@ -403,56 +395,6 @@ static void dentry_lru_add(struct dentry *dentry)
d_lru_add(dentry);
}
-/*
- * Remove a dentry with references from the LRU.
- *
- * If we are on the shrink list, then we can get to try_prune_one_dentry() and
- * lose our last reference through the parent walk. In this case, we need to
- * remove ourselves from the shrink list, not the LRU.
- */
-static void dentry_lru_del(struct dentry *dentry)
-{
- if (dentry->d_flags & DCACHE_LRU_LIST) {
- if (dentry->d_flags & DCACHE_SHRINK_LIST)
- return d_shrink_del(dentry);
- d_lru_del(dentry);
- }
-}
-
-/**
- * d_kill - kill dentry and return parent
- * @dentry: dentry to kill
- * @parent: parent dentry
- *
- * The dentry must already be unhashed and removed from the LRU.
- *
- * If this is the root of the dentry tree, return NULL.
- *
- * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
- * d_kill.
- */
-static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
- __releases(dentry->d_lock)
- __releases(parent->d_lock)
- __releases(dentry->d_inode->i_lock)
-{
- list_del(&dentry->d_u.d_child);
- /*
- * Inform d_walk() that we are no longer attached to the
- * dentry tree
- */
- dentry->d_flags |= DCACHE_DENTRY_KILLED;
- if (parent)
- spin_unlock(&parent->d_lock);
- dentry_iput(dentry);
- /*
- * dentry_iput drops the locks, at which point nobody (except
- * transient RCU lookups) can reach this dentry.
- */
- d_free(dentry);
- return parent;
-}
-
/**
* d_drop - drop a dentry
* @dentry: dentry to drop
@@ -510,7 +452,14 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure)
__releases(dentry->d_lock)
{
struct inode *inode;
- struct dentry *parent;
+ struct dentry *parent = NULL;
+ bool can_free = true;
+
+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
+ can_free = dentry->d_flags & DCACHE_MAY_FREE;
+ spin_unlock(&dentry->d_lock);
+ goto out;
+ }
inode = dentry->d_inode;
if (inode && !spin_trylock(&inode->i_lock)) {
@@ -521,9 +470,7 @@ relock:
}
return dentry; /* try again with same dentry */
}
- if (IS_ROOT(dentry))
- parent = NULL;
- else
+ if (!IS_ROOT(dentry))
parent = dentry->d_parent;
if (parent && !spin_trylock(&parent->d_lock)) {
if (inode)
@@ -543,10 +490,40 @@ relock:
if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
dentry->d_op->d_prune(dentry);
- dentry_lru_del(dentry);
+ if (dentry->d_flags & DCACHE_LRU_LIST) {
+ if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
+ d_lru_del(dentry);
+ }
/* if it was on the hash then remove it */
__d_drop(dentry);
- return d_kill(dentry, parent);
+ list_del(&dentry->d_u.d_child);
+ /*
+ * Inform d_walk() that we are no longer attached to the
+ * dentry tree
+ */
+ dentry->d_flags |= DCACHE_DENTRY_KILLED;
+ if (parent)
+ spin_unlock(&parent->d_lock);
+ dentry_iput(dentry);
+ /*
+ * dentry_iput drops the locks, at which point nobody (except
+ * transient RCU lookups) can reach this dentry.
+ */
+ BUG_ON((int)dentry->d_lockref.count > 0);
+ this_cpu_dec(nr_dentry);
+ if (dentry->d_op && dentry->d_op->d_release)
+ dentry->d_op->d_release(dentry);
+
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
+ dentry->d_flags |= DCACHE_MAY_FREE;
+ can_free = false;
+ }
+ spin_unlock(&dentry->d_lock);
+out:
+ if (likely(can_free))
+ dentry_free(dentry);
+ return parent;
}
/*
@@ -815,65 +792,13 @@ restart:
}
EXPORT_SYMBOL(d_prune_aliases);
-/*
- * Try to throw away a dentry - free the inode, dput the parent.
- * Requires dentry->d_lock is held, and dentry->d_count == 0.
- * Releases dentry->d_lock.
- *
- * This may fail if locks cannot be acquired no problem, just try again.
- */
-static struct dentry * try_prune_one_dentry(struct dentry *dentry)
- __releases(dentry->d_lock)
-{
- struct dentry *parent;
-
- parent = dentry_kill(dentry, 0);
- /*
- * If dentry_kill returns NULL, we have nothing more to do.
- * if it returns the same dentry, trylocks failed. In either
- * case, just loop again.
- *
- * Otherwise, we need to prune ancestors too. This is necessary
- * to prevent quadratic behavior of shrink_dcache_parent(), but
- * is also expected to be beneficial in reducing dentry cache
- * fragmentation.
- */
- if (!parent)
- return NULL;
- if (parent == dentry)
- return dentry;
-
- /* Prune ancestors. */
- dentry = parent;
- while (dentry) {
- if (lockref_put_or_lock(&dentry->d_lockref))
- return NULL;
- dentry = dentry_kill(dentry, 1);
- }
- return NULL;
-}
-
static void shrink_dentry_list(struct list_head *list)
{
- struct dentry *dentry;
+ struct dentry *dentry, *parent;
- rcu_read_lock();
- for (;;) {
- dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
- if (&dentry->d_lru == list)
- break; /* empty */
-
- /*
- * Get the dentry lock, and re-verify that the dentry is
- * this on the shrinking list. If it is, we know that
- * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
- */
+ while (!list_empty(list)) {
+ dentry = list_entry(list->prev, struct dentry, d_lru);
spin_lock(&dentry->d_lock);
- if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
- spin_unlock(&dentry->d_lock);
- continue;
- }
-
/*
* The dispose list is isolated and dentries are not accounted
* to the LRU here, so we can simply remove it from the list
@@ -885,30 +810,38 @@ static void shrink_dentry_list(struct list_head *list)
* We found an inuse dentry which was not removed from
* the LRU because of laziness during lookup. Do not free it.
*/
- if (dentry->d_lockref.count) {
+ if ((int)dentry->d_lockref.count > 0) {
spin_unlock(&dentry->d_lock);
continue;
}
- rcu_read_unlock();
+ parent = dentry_kill(dentry, 0);
/*
- * If 'try_to_prune()' returns a dentry, it will
- * be the same one we passed in, and d_lock will
- * have been held the whole time, so it will not
- * have been added to any other lists. We failed
- * to get the inode lock.
- *
- * We just add it back to the shrink list.
+ * If dentry_kill returns NULL, we have nothing more to do.
*/
- dentry = try_prune_one_dentry(dentry);
+ if (!parent)
+ continue;
- rcu_read_lock();
- if (dentry) {
+ if (unlikely(parent == dentry)) {
+ /*
+ * trylocks have failed and d_lock has been held the
+ * whole time, so it could not have been added to any
+ * other lists. Just add it back to the shrink list.
+ */
d_shrink_add(dentry, list);
spin_unlock(&dentry->d_lock);
+ continue;
}
+ /*
+ * We need to prune ancestors too. This is necessary to prevent
+ * quadratic behavior of shrink_dcache_parent(), but is also
+ * expected to be beneficial in reducing dentry cache
+ * fragmentation.
+ */
+ dentry = parent;
+ while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
+ dentry = dentry_kill(dentry, 1);
}
- rcu_read_unlock();
}
static enum lru_status
@@ -1261,34 +1194,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
if (data->start == dentry)
goto out;
- /*
- * move only zero ref count dentries to the dispose list.
- *
- * Those which are presently on the shrink list, being processed
- * by shrink_dentry_list(), shouldn't be moved. Otherwise the
- * loop in shrink_dcache_parent() might not make any progress
- * and loop forever.
- */
- if (dentry->d_lockref.count) {
- dentry_lru_del(dentry);
- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
- /*
- * We can't use d_lru_shrink_move() because we
- * need to get the global LRU lock and do the
- * LRU accounting.
- */
- d_lru_del(dentry);
- d_shrink_add(dentry, &data->dispose);
+ if (dentry->d_flags & DCACHE_SHRINK_LIST) {
data->found++;
- ret = D_WALK_NORETRY;
+ } else {
+ if (dentry->d_flags & DCACHE_LRU_LIST)
+ d_lru_del(dentry);
+ if (!dentry->d_lockref.count) {
+ d_shrink_add(dentry, &data->dispose);
+ data->found++;
+ }
}
/*
* We can return to the caller if we have found some (this
* ensures forward progress). We'll be coming back to find
* the rest.
*/
- if (data->found && need_resched())
- ret = D_WALK_QUIT;
+ if (!list_empty(&data->dispose))
+ ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
out:
return ret;
}
@@ -1318,45 +1240,35 @@ void shrink_dcache_parent(struct dentry *parent)
}
EXPORT_SYMBOL(shrink_dcache_parent);
-static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
+static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
{
- struct select_data *data = _data;
- enum d_walk_ret ret = D_WALK_CONTINUE;
+ /* it has busy descendents; complain about those instead */
+ if (!list_empty(&dentry->d_subdirs))
+ return D_WALK_CONTINUE;
- if (dentry->d_lockref.count) {
- dentry_lru_del(dentry);
- if (likely(!list_empty(&dentry->d_subdirs)))
- goto out;
- if (dentry == data->start && dentry->d_lockref.count == 1)
- goto out;
- printk(KERN_ERR
- "BUG: Dentry %p{i=%lx,n=%s}"
- " still in use (%d)"
- " [unmount of %s %s]\n",
+ /* root with refcount 1 is fine */
+ if (dentry == _data && dentry->d_lockref.count == 1)
+ return D_WALK_CONTINUE;
+
+ printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
+ " still in use (%d) [unmount of %s %s]\n",
dentry,
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
- dentry->d_name.name,
+ dentry,
dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
- BUG();
- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
- /*
- * We can't use d_lru_shrink_move() because we
- * need to get the global LRU lock and do the
- * LRU accounting.
- */
- if (dentry->d_flags & DCACHE_LRU_LIST)
- d_lru_del(dentry);
- d_shrink_add(dentry, &data->dispose);
- data->found++;
- ret = D_WALK_NORETRY;
- }
-out:
- if (data->found && need_resched())
- ret = D_WALK_QUIT;
- return ret;
+ WARN_ON(1);
+ return D_WALK_CONTINUE;
+}
+
+static void do_one_tree(struct dentry *dentry)
+{
+ shrink_dcache_parent(dentry);
+ d_walk(dentry, dentry, umount_check, NULL);
+ d_drop(dentry);
+ dput(dentry);
}
/*
@@ -1366,40 +1278,15 @@ void shrink_dcache_for_umount(struct super_block *sb)
{
struct dentry *dentry;
- if (down_read_trylock(&sb->s_umount))
- BUG();
+ WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
dentry = sb->s_root;
sb->s_root = NULL;
- for (;;) {
- struct select_data data;
-
- INIT_LIST_HEAD(&data.dispose);
- data.start = dentry;
- data.found = 0;
-
- d_walk(dentry, &data, umount_collect, NULL);
- if (!data.found)
- break;
-
- shrink_dentry_list(&data.dispose);
- cond_resched();
- }
- d_drop(dentry);
- dput(dentry);
+ do_one_tree(dentry);
while (!hlist_bl_empty(&sb->s_anon)) {
- struct select_data data;
- dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
-
- INIT_LIST_HEAD(&data.dispose);
- data.start = NULL;
- data.found = 0;
-
- d_walk(dentry, &data, umount_collect, NULL);
- if (data.found)
- shrink_dentry_list(&data.dispose);
- cond_resched();
+ dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
+ do_one_tree(dentry);
}
}
@@ -1647,8 +1534,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
unsigned add_flags = d_flags_for_inode(inode);
spin_lock(&dentry->d_lock);
- dentry->d_flags &= ~DCACHE_ENTRY_TYPE;
- dentry->d_flags |= add_flags;
+ __d_set_type(dentry, add_flags);
if (inode)
hlist_add_head(&dentry->d_alias, &inode->i_dentry);
dentry->d_inode = inode;
diff --git a/fs/exec.c b/fs/exec.c
index 476f3ebf437e..238b7aa26f68 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -657,10 +657,10 @@ int setup_arg_pages(struct linux_binprm *bprm,
unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
- /* Limit stack size to 1GB */
+ /* Limit stack size */
stack_base = rlimit_max(RLIMIT_STACK);
- if (stack_base > (1 << 30))
- stack_base = 1 << 30;
+ if (stack_base > STACK_SIZE_MAX)
+ stack_base = STACK_SIZE_MAX;
/* Make sure we didn't let the argument array grow too large. */
if (vma->vm_end - vma->vm_start > stack_base)
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index a0b0855d00a9..205e0d5d5307 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -348,7 +348,7 @@ int __init fuse_ctl_init(void)
return register_filesystem(&fuse_ctl_fs_type);
}
-void fuse_ctl_cleanup(void)
+void __exit fuse_ctl_cleanup(void)
{
unregister_filesystem(&fuse_ctl_fs_type);
}
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 5b4e035b364c..42198359fa1b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -679,6 +679,14 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
return create_new_entry(fc, req, dir, entry, S_IFLNK);
}
+static inline void fuse_update_ctime(struct inode *inode)
+{
+ if (!IS_NOCMTIME(inode)) {
+ inode->i_ctime = current_fs_time(inode->i_sb);
+ mark_inode_dirty_sync(inode);
+ }
+}
+
static int fuse_unlink(struct inode *dir, struct dentry *entry)
{
int err;
@@ -713,6 +721,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
fuse_invalidate_attr(inode);
fuse_invalidate_attr(dir);
fuse_invalidate_entry_cache(entry);
+ fuse_update_ctime(inode);
} else if (err == -EINTR)
fuse_invalidate_entry(entry);
return err;
@@ -743,23 +752,26 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
return err;
}
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
- struct inode *newdir, struct dentry *newent)
+static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
+ struct inode *newdir, struct dentry *newent,
+ unsigned int flags, int opcode, size_t argsize)
{
int err;
- struct fuse_rename_in inarg;
+ struct fuse_rename2_in inarg;
struct fuse_conn *fc = get_fuse_conn(olddir);
- struct fuse_req *req = fuse_get_req_nopages(fc);
+ struct fuse_req *req;
+ req = fuse_get_req_nopages(fc);
if (IS_ERR(req))
return PTR_ERR(req);
- memset(&inarg, 0, sizeof(inarg));
+ memset(&inarg, 0, argsize);
inarg.newdir = get_node_id(newdir);
- req->in.h.opcode = FUSE_RENAME;
+ inarg.flags = flags;
+ req->in.h.opcode = opcode;
req->in.h.nodeid = get_node_id(olddir);
req->in.numargs = 3;
- req->in.args[0].size = sizeof(inarg);
+ req->in.args[0].size = argsize;
req->in.args[0].value = &inarg;
req->in.args[1].size = oldent->d_name.len + 1;
req->in.args[1].value = oldent->d_name.name;
@@ -771,15 +783,22 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
if (!err) {
/* ctime changes */
fuse_invalidate_attr(oldent->d_inode);
+ fuse_update_ctime(oldent->d_inode);
+
+ if (flags & RENAME_EXCHANGE) {
+ fuse_invalidate_attr(newent->d_inode);
+ fuse_update_ctime(newent->d_inode);
+ }
fuse_invalidate_attr(olddir);
if (olddir != newdir)
fuse_invalidate_attr(newdir);
/* newent will end up negative */
- if (newent->d_inode) {
+ if (!(flags & RENAME_EXCHANGE) && newent->d_inode) {
fuse_invalidate_attr(newent->d_inode);
fuse_invalidate_entry_cache(newent);
+ fuse_update_ctime(newent->d_inode);
}
} else if (err == -EINTR) {
/* If request was interrupted, DEITY only knows if the
@@ -795,6 +814,36 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
return err;
}
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+ struct inode *newdir, struct dentry *newent)
+{
+ return fuse_rename_common(olddir, oldent, newdir, newent, 0,
+ FUSE_RENAME, sizeof(struct fuse_rename_in));
+}
+
+static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
+ struct inode *newdir, struct dentry *newent,
+ unsigned int flags)
+{
+ struct fuse_conn *fc = get_fuse_conn(olddir);
+ int err;
+
+ if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
+ return -EINVAL;
+
+ if (fc->no_rename2 || fc->minor < 23)
+ return -EINVAL;
+
+ err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+ FUSE_RENAME2, sizeof(struct fuse_rename2_in));
+ if (err == -ENOSYS) {
+ fc->no_rename2 = 1;
+ err = -EINVAL;
+ }
+ return err;
+
+}
+
static int fuse_link(struct dentry *entry, struct inode *newdir,
struct dentry *newent)
{
@@ -829,6 +878,7 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
inc_nlink(inode);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
+ fuse_update_ctime(inode);
} else if (err == -EINTR) {
fuse_invalidate_attr(inode);
}
@@ -846,6 +896,8 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
attr->size = i_size_read(inode);
attr->mtime = inode->i_mtime.tv_sec;
attr->mtimensec = inode->i_mtime.tv_nsec;
+ attr->ctime = inode->i_ctime.tv_sec;
+ attr->ctimensec = inode->i_ctime.tv_nsec;
}
stat->dev = inode->i_sb->s_dev;
@@ -1504,7 +1556,7 @@ static bool update_mtime(unsigned ivalid, bool trust_local_mtime)
}
static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg,
- bool trust_local_mtime)
+ bool trust_local_cmtime)
{
unsigned ivalid = iattr->ia_valid;
@@ -1523,13 +1575,18 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg,
if (!(ivalid & ATTR_ATIME_SET))
arg->valid |= FATTR_ATIME_NOW;
}
- if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_mtime)) {
+ if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
arg->valid |= FATTR_MTIME;
arg->mtime = iattr->ia_mtime.tv_sec;
arg->mtimensec = iattr->ia_mtime.tv_nsec;
- if (!(ivalid & ATTR_MTIME_SET) && !trust_local_mtime)
+ if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
arg->valid |= FATTR_MTIME_NOW;
}
+ if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
+ arg->valid |= FATTR_CTIME;
+ arg->ctime = iattr->ia_ctime.tv_sec;
+ arg->ctimensec = iattr->ia_ctime.tv_nsec;
+ }
}
/*
@@ -1597,39 +1654,38 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_req *req,
/*
* Flush inode->i_mtime to the server
*/
-int fuse_flush_mtime(struct file *file, bool nofail)
+int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
{
- struct inode *inode = file->f_mapping->host;
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = get_fuse_conn(inode);
- struct fuse_req *req = NULL;
+ struct fuse_req *req;
struct fuse_setattr_in inarg;
struct fuse_attr_out outarg;
int err;
- if (nofail) {
- req = fuse_get_req_nofail_nopages(fc, file);
- } else {
- req = fuse_get_req_nopages(fc);
- if (IS_ERR(req))
- return PTR_ERR(req);
- }
+ req = fuse_get_req_nopages(fc);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
- inarg.valid |= FATTR_MTIME;
+ inarg.valid = FATTR_MTIME;
inarg.mtime = inode->i_mtime.tv_sec;
inarg.mtimensec = inode->i_mtime.tv_nsec;
-
+ if (fc->minor >= 23) {
+ inarg.valid |= FATTR_CTIME;
+ inarg.ctime = inode->i_ctime.tv_sec;
+ inarg.ctimensec = inode->i_ctime.tv_nsec;
+ }
+ if (ff) {
+ inarg.valid |= FATTR_FH;
+ inarg.fh = ff->fh;
+ }
fuse_setattr_fill(fc, req, inode, &inarg, &outarg);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
- if (!err)
- clear_bit(FUSE_I_MTIME_DIRTY, &fi->state);
-
return err;
}
@@ -1653,7 +1709,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
bool is_wb = fc->writeback_cache;
loff_t oldsize;
int err;
- bool trust_local_mtime = is_wb && S_ISREG(inode->i_mode);
+ bool trust_local_cmtime = is_wb && S_ISREG(inode->i_mode);
if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
attr->ia_valid |= ATTR_FORCE;
@@ -1678,11 +1734,13 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
+ if (trust_local_cmtime && attr->ia_size != inode->i_size)
+ attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
memset(&inarg, 0, sizeof(inarg));
memset(&outarg, 0, sizeof(outarg));
- iattr_to_fattr(attr, &inarg, trust_local_mtime);
+ iattr_to_fattr(attr, &inarg, trust_local_cmtime);
if (file) {
struct fuse_file *ff = file->private_data;
inarg.valid |= FATTR_FH;
@@ -1711,9 +1769,12 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
spin_lock(&fc->lock);
/* the kernel maintains i_mtime locally */
- if (trust_local_mtime && (attr->ia_valid & ATTR_MTIME)) {
- inode->i_mtime = attr->ia_mtime;
- clear_bit(FUSE_I_MTIME_DIRTY, &fi->state);
+ if (trust_local_cmtime) {
+ if (attr->ia_valid & ATTR_MTIME)
+ inode->i_mtime = attr->ia_mtime;
+ if (attr->ia_valid & ATTR_CTIME)
+ inode->i_ctime = attr->ia_ctime;
+ /* FIXME: clear I_DIRTY_SYNC? */
}
fuse_change_attributes_common(inode, &outarg.attr,
@@ -1810,8 +1871,10 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
fc->no_setxattr = 1;
err = -EOPNOTSUPP;
}
- if (!err)
+ if (!err) {
fuse_invalidate_attr(inode);
+ fuse_update_ctime(inode);
+ }
return err;
}
@@ -1941,20 +2004,11 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
fc->no_removexattr = 1;
err = -EOPNOTSUPP;
}
- if (!err)
+ if (!err) {
fuse_invalidate_attr(inode);
- return err;
-}
-
-static int fuse_update_time(struct inode *inode, struct timespec *now,
- int flags)
-{
- if (flags & S_MTIME) {
- inode->i_mtime = *now;
- set_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state);
- BUG_ON(!S_ISREG(inode->i_mode));
+ fuse_update_ctime(inode);
}
- return 0;
+ return err;
}
static const struct inode_operations fuse_dir_inode_operations = {
@@ -1964,6 +2018,7 @@ static const struct inode_operations fuse_dir_inode_operations = {
.unlink = fuse_unlink,
.rmdir = fuse_rmdir,
.rename = fuse_rename,
+ .rename2 = fuse_rename2,
.link = fuse_link,
.setattr = fuse_setattr,
.create = fuse_create,
@@ -1996,7 +2051,6 @@ static const struct inode_operations fuse_common_inode_operations = {
.getxattr = fuse_getxattr,
.listxattr = fuse_listxattr,
.removexattr = fuse_removexattr,
- .update_time = fuse_update_time,
};
static const struct inode_operations fuse_symlink_inode_operations = {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 13f8bdec5110..96d513e01a5d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -223,6 +223,8 @@ void fuse_finish_open(struct inode *inode, struct file *file)
i_size_write(inode, 0);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
+ if (fc->writeback_cache)
+ file_update_time(file);
}
if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
fuse_link_write_file(file);
@@ -232,18 +234,26 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
+ bool lock_inode = (file->f_flags & O_TRUNC) &&
+ fc->atomic_o_trunc &&
+ fc->writeback_cache;
err = generic_file_open(inode, file);
if (err)
return err;
+ if (lock_inode)
+ mutex_lock(&inode->i_mutex);
+
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
- if (err)
- return err;
- fuse_finish_open(inode, file);
+ if (!err)
+ fuse_finish_open(inode, file);
- return 0;
+ if (lock_inode)
+ mutex_unlock(&inode->i_mutex);
+
+ return err;
}
static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
@@ -314,10 +324,7 @@ static int fuse_release(struct inode *inode, struct file *file)
/* see fuse_vma_close() for !writeback_cache case */
if (fc->writeback_cache)
- filemap_write_and_wait(file->f_mapping);
-
- if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state))
- fuse_flush_mtime(file, true);
+ write_inode_now(inode, 1);
fuse_release_common(file, FUSE_RELEASE);
@@ -439,7 +446,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (fc->no_flush)
return 0;
- err = filemap_write_and_wait(file->f_mapping);
+ err = write_inode_now(inode, 1);
if (err)
return err;
@@ -480,13 +487,6 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
if (is_bad_inode(inode))
return -EIO;
- err = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (err)
- return err;
-
- if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
- return 0;
-
mutex_lock(&inode->i_mutex);
/*
@@ -494,17 +494,17 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
* wait for all outstanding writes, before sending the FSYNC
* request.
*/
- err = write_inode_now(inode, 0);
+ err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
goto out;
fuse_sync_writes(inode);
+ err = sync_inode_metadata(inode, 1);
+ if (err)
+ goto out;
- if (test_bit(FUSE_I_MTIME_DIRTY, &get_fuse_inode(inode)->state)) {
- int err = fuse_flush_mtime(file, false);
- if (err)
- goto out;
- }
+ if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
+ goto out;
req = fuse_get_req_nopages(fc);
if (IS_ERR(req)) {
@@ -1659,13 +1659,13 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
fuse_writepage_free(fc, req);
}
-static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
- struct fuse_inode *fi)
+static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
+ struct fuse_inode *fi)
{
struct fuse_file *ff = NULL;
spin_lock(&fc->lock);
- if (!WARN_ON(list_empty(&fi->write_files))) {
+ if (!list_empty(&fi->write_files)) {
ff = list_entry(fi->write_files.next, struct fuse_file,
write_entry);
fuse_file_get(ff);
@@ -1675,6 +1675,29 @@ static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
return ff;
}
+static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
+ struct fuse_inode *fi)
+{
+ struct fuse_file *ff = __fuse_write_file_get(fc, fi);
+ WARN_ON(!ff);
+ return ff;
+}
+
+int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_file *ff;
+ int err;
+
+ ff = __fuse_write_file_get(fc, fi);
+ err = fuse_flush_times(inode, ff);
+ if (ff)
+ fuse_file_put(ff, 0);
+
+ return err;
+}
+
static int fuse_writepage_locked(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -2972,6 +2995,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
(mode & FALLOC_FL_PUNCH_HOLE);
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ return -EOPNOTSUPP;
+
if (fc->no_fallocate)
return -EOPNOTSUPP;
@@ -3017,12 +3043,8 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE)) {
bool changed = fuse_write_update_size(inode, offset + length);
- if (changed && fc->writeback_cache) {
- struct fuse_inode *fi = get_fuse_inode(inode);
-
- inode->i_mtime = current_fs_time(inode->i_sb);
- set_bit(FUSE_I_MTIME_DIRTY, &fi->state);
- }
+ if (changed && fc->writeback_cache)
+ file_update_time(file);
}
if (mode & FALLOC_FL_PUNCH_HOLE)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index a257ed8ebee6..7aa5c75e0de1 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -119,8 +119,6 @@ enum {
FUSE_I_INIT_RDPLUS,
/** An operation changing file size is in progress */
FUSE_I_SIZE_UNSTABLE,
- /** i_mtime has been updated locally; a flush to userspace needed */
- FUSE_I_MTIME_DIRTY,
};
struct fuse_conn;
@@ -544,6 +542,9 @@ struct fuse_conn {
/** Is fallocate not implemented by fs? */
unsigned no_fallocate:1;
+ /** Is rename with flags implemented by fs? */
+ unsigned no_rename2:1;
+
/** Use enhanced/automatic page cache invalidation. */
unsigned auto_inval_data:1;
@@ -725,7 +726,7 @@ int fuse_dev_init(void);
void fuse_dev_cleanup(void);
int fuse_ctl_init(void);
-void fuse_ctl_cleanup(void);
+void __exit fuse_ctl_cleanup(void);
/**
* Allocate a request
@@ -891,7 +892,8 @@ int fuse_dev_release(struct inode *inode, struct file *file);
bool fuse_write_update_size(struct inode *inode, loff_t pos);
-int fuse_flush_mtime(struct file *file, bool nofail);
+int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
+int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
int fuse_do_setattr(struct inode *inode, struct iattr *attr,
struct file *file);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8d611696fcad..754dcf23de8a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -175,9 +175,9 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
if (!fc->writeback_cache || !S_ISREG(inode->i_mode)) {
inode->i_mtime.tv_sec = attr->mtime;
inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode->i_ctime.tv_sec = attr->ctime;
+ inode->i_ctime.tv_nsec = attr->ctimensec;
}
- inode->i_ctime.tv_sec = attr->ctime;
- inode->i_ctime.tv_nsec = attr->ctimensec;
if (attr->blksize != 0)
inode->i_blkbits = ilog2(attr->blksize);
@@ -256,6 +256,8 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
inode->i_size = attr->size;
inode->i_mtime.tv_sec = attr->mtime;
inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode->i_ctime.tv_sec = attr->ctime;
+ inode->i_ctime.tv_nsec = attr->ctimensec;
if (S_ISREG(inode->i_mode)) {
fuse_init_common(inode);
fuse_init_file_inode(inode);
@@ -303,7 +305,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
if ((inode->i_state & I_NEW)) {
inode->i_flags |= S_NOATIME;
- if (!fc->writeback_cache || !S_ISREG(inode->i_mode))
+ if (!fc->writeback_cache || !S_ISREG(attr->mode))
inode->i_flags |= S_NOCMTIME;
inode->i_generation = generation;
inode->i_data.backing_dev_info = &fc->bdi;
@@ -788,6 +790,7 @@ static const struct super_operations fuse_super_operations = {
.alloc_inode = fuse_alloc_inode,
.destroy_inode = fuse_destroy_inode,
.evict_inode = fuse_evict_inode,
+ .write_inode = fuse_write_inode,
.drop_inode = generic_delete_inode,
.remount_fs = fuse_remount_fs,
.put_super = fuse_put_super,
@@ -890,6 +893,11 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->async_dio = 1;
if (arg->flags & FUSE_WRITEBACK_CACHE)
fc->writeback_cache = 1;
+ if (arg->time_gran && arg->time_gran <= 1000000000)
+ fc->sb->s_time_gran = arg->time_gran;
+ else
+ fc->sb->s_time_gran = 1000000000;
+
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -996,7 +1004,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_flags & MS_MANDLOCK)
goto err;
- sb->s_flags &= ~MS_NOSEC;
+ sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
if (!parse_fuse_opt((char *) data, &d, is_bdev))
goto err;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 204027520937..e19d4c0cacae 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1030,6 +1030,11 @@ static int __init init_hugetlbfs_fs(void)
int error;
int i;
+ if (!hugepages_supported()) {
+ pr_info("hugetlbfs: disabling because there are no supported hugepage sizes\n");
+ return -ENOTSUPP;
+ }
+
error = bdi_init(&hugetlbfs_backing_dev_info);
if (error)
return error;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e01ea4a14a01..5e9a80cfc3d8 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -610,6 +610,7 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
static int kernfs_fop_open(struct inode *inode, struct file *file)
{
struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
+ struct kernfs_root *root = kernfs_root(kn);
const struct kernfs_ops *ops;
struct kernfs_open_file *of;
bool has_read, has_write, has_mmap;
@@ -624,14 +625,16 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
has_write = ops->write || ops->mmap;
has_mmap = ops->mmap;
- /* check perms and supported operations */
- if ((file->f_mode & FMODE_WRITE) &&
- (!(inode->i_mode & S_IWUGO) || !has_write))
- goto err_out;
+ /* see the flag definition for details */
+ if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
+ if ((file->f_mode & FMODE_WRITE) &&
+ (!(inode->i_mode & S_IWUGO) || !has_write))
+ goto err_out;
- if ((file->f_mode & FMODE_READ) &&
- (!(inode->i_mode & S_IRUGO) || !has_read))
- goto err_out;
+ if ((file->f_mode & FMODE_READ) &&
+ (!(inode->i_mode & S_IRUGO) || !has_read))
+ goto err_out;
+ }
/* allocate a kernfs_open_file for the file */
error = -ENOMEM;
diff --git a/fs/locks.c b/fs/locks.c
index e663aeac579e..e390bd9ae068 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -389,18 +389,6 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
- /* Ensure that fl->fl_filp has compatible f_mode */
- switch (l->l_type) {
- case F_RDLCK:
- if (!(filp->f_mode & FMODE_READ))
- return -EBADF;
- break;
- case F_WRLCK:
- if (!(filp->f_mode & FMODE_WRITE))
- return -EBADF;
- break;
- }
-
return assign_type(fl, l->l_type);
}
@@ -2034,6 +2022,22 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
return error;
}
+/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */
+static int
+check_fmode_for_setlk(struct file_lock *fl)
+{
+ switch (fl->fl_type) {
+ case F_RDLCK:
+ if (!(fl->fl_file->f_mode & FMODE_READ))
+ return -EBADF;
+ break;
+ case F_WRLCK:
+ if (!(fl->fl_file->f_mode & FMODE_WRITE))
+ return -EBADF;
+ }
+ return 0;
+}
+
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
@@ -2071,6 +2075,10 @@ again:
if (error)
goto out;
+ error = check_fmode_for_setlk(file_lock);
+ if (error)
+ goto out;
+
/*
* If the cmd is requesting file-private locks, then set the
* FL_OFDLCK flag and override the owner.
@@ -2206,6 +2214,10 @@ again:
if (error)
goto out;
+ error = check_fmode_for_setlk(file_lock);
+ if (error)
+ goto out;
+
/*
* If the cmd is requesting file-private locks, then set the
* FL_OFDLCK flag and override the owner.
diff --git a/fs/namei.c b/fs/namei.c
index c6157c894fce..80168273396b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1542,7 +1542,7 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
inode = path->dentry->d_inode;
}
err = -ENOENT;
- if (!inode)
+ if (!inode || d_is_negative(path->dentry))
goto out_path_put;
if (should_follow_link(path->dentry, follow)) {
@@ -2249,7 +2249,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
mutex_unlock(&dir->d_inode->i_mutex);
done:
- if (!dentry->d_inode) {
+ if (!dentry->d_inode || d_is_negative(dentry)) {
error = -ENOENT;
dput(dentry);
goto out;
@@ -2994,7 +2994,7 @@ retry_lookup:
finish_lookup:
/* we _can_ be in RCU mode here */
error = -ENOENT;
- if (d_is_negative(path->dentry)) {
+ if (!inode || d_is_negative(path->dentry)) {
path_to_nameidata(path, nd);
goto out;
}
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 6f3f392d48af..b6f46013dddf 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -402,8 +402,10 @@ sort_pacl(struct posix_acl *pacl)
* by uid/gid. */
int i, j;
- if (pacl->a_count <= 4)
- return; /* no users or groups */
+ /* no users or groups */
+ if (!pacl || pacl->a_count <= 4)
+ return;
+
i = 1;
while (pacl->a_entries[i].e_tag == ACL_USER)
i++;
@@ -530,13 +532,12 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
/*
* ACLs with no ACEs are treated differently in the inheritable
- * and effective cases: when there are no inheritable ACEs, we
- * set a zero-length default posix acl:
+ * and effective cases: when there are no inheritable ACEs,
+ * calls ->set_acl with a NULL ACL structure.
*/
- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) {
- pacl = posix_acl_alloc(0, GFP_KERNEL);
- return pacl ? pacl : ERR_PTR(-ENOMEM);
- }
+ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+ return NULL;
+
/*
* When there are no effective ACEs, the following will end
* up setting a 3-element effective posix ACL with all
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3ba65979a3cd..32b699bebb9c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1078,6 +1078,18 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
return NULL;
}
clp->cl_name.len = name.len;
+ INIT_LIST_HEAD(&clp->cl_sessions);
+ idr_init(&clp->cl_stateids);
+ atomic_set(&clp->cl_refcount, 0);
+ clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+ INIT_LIST_HEAD(&clp->cl_idhash);
+ INIT_LIST_HEAD(&clp->cl_openowners);
+ INIT_LIST_HEAD(&clp->cl_delegations);
+ INIT_LIST_HEAD(&clp->cl_lru);
+ INIT_LIST_HEAD(&clp->cl_callbacks);
+ INIT_LIST_HEAD(&clp->cl_revoked);
+ spin_lock_init(&clp->cl_lock);
+ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
}
@@ -1095,6 +1107,7 @@ free_client(struct nfs4_client *clp)
WARN_ON_ONCE(atomic_read(&ses->se_ref));
free_session(ses);
}
+ rpc_destroy_wait_queue(&clp->cl_cb_waitq);
free_svc_cred(&clp->cl_cred);
kfree(clp->cl_name.data);
idr_destroy(&clp->cl_stateids);
@@ -1347,7 +1360,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
if (clp == NULL)
return NULL;
- INIT_LIST_HEAD(&clp->cl_sessions);
ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
if (ret) {
spin_lock(&nn->client_lock);
@@ -1355,20 +1367,9 @@ static struct nfs4_client *create_client(struct xdr_netobj name,
spin_unlock(&nn->client_lock);
return NULL;
}
- idr_init(&clp->cl_stateids);
- atomic_set(&clp->cl_refcount, 0);
- clp->cl_cb_state = NFSD4_CB_UNKNOWN;
- INIT_LIST_HEAD(&clp->cl_idhash);
- INIT_LIST_HEAD(&clp->cl_openowners);
- INIT_LIST_HEAD(&clp->cl_delegations);
- INIT_LIST_HEAD(&clp->cl_lru);
- INIT_LIST_HEAD(&clp->cl_callbacks);
- INIT_LIST_HEAD(&clp->cl_revoked);
- spin_lock_init(&clp->cl_lock);
nfsd4_init_callback(&clp->cl_cb_null);
clp->cl_time = get_seconds();
clear_bit(0, &clp->cl_cb_slot_busy);
- rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
copy_verf(clp, verf);
rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
gen_confirm(clp);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 4e565c814309..732648b270dc 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -698,6 +698,8 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
}
group->overflow_event = &oevent->fse;
+ if (force_o_largefile())
+ event_f_flags |= O_LARGEFILE;
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
spin_lock_init(&group->fanotify_data.access_lock);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index af3f7aa73e13..ee1f88419cb0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -472,11 +472,15 @@ bail:
void dlm_destroy_master_caches(void)
{
- if (dlm_lockname_cache)
+ if (dlm_lockname_cache) {
kmem_cache_destroy(dlm_lockname_cache);
+ dlm_lockname_cache = NULL;
+ }
- if (dlm_lockres_cache)
+ if (dlm_lockres_cache) {
kmem_cache_destroy(dlm_lockres_cache);
+ dlm_lockres_cache = NULL;
+ }
}
static void dlm_lockres_release(struct kref *kref)
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 9e363e41dacc..0855f772cd41 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -246,6 +246,12 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
umode_t mode = 0;
int not_equiv = 0;
+ /*
+ * A null ACL can always be presented as mode bits.
+ */
+ if (!acl)
+ return 0;
+
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch (pa->e_tag) {
case ACL_USER_OBJ:
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 28cc1acd5439..e9ef59b3abb1 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -47,12 +47,13 @@ static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
ssize_t count;
char *buf;
- /* acquire buffer and ensure that it's >= PAGE_SIZE */
+ /* acquire buffer and ensure that it's >= PAGE_SIZE and clear */
count = seq_get_buf(sf, &buf);
if (count < PAGE_SIZE) {
seq_commit(sf, -1);
return 0;
}
+ memset(buf, 0, PAGE_SIZE);
/*
* Invoke show(). Control may reach here via seq file lseek even
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index a66ad6196f59..8794423f7efb 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -63,7 +63,8 @@ int __init sysfs_init(void)
{
int err;
- sysfs_root = kernfs_create_root(NULL, 0, NULL);
+ sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
+ NULL);
if (IS_ERR(sysfs_root))
return PTR_ERR(sysfs_root);
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 01b6a0102fbd..abda1124a70f 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -213,7 +213,7 @@ xfs_attr_calc_size(
* Out of line attribute, cannot double split, but
* make room for the attribute value itself.
*/
- uint dblocks = XFS_B_TO_FSB(mp, valuelen);
+ uint dblocks = xfs_attr3_rmt_blocks(mp, valuelen);
nblks += dblocks;
nblks += XFS_NEXTENTADD_SPACE_RES(mp, dblocks, XFS_ATTR_FORK);
}
@@ -698,11 +698,22 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
trace_xfs_attr_leaf_replace(args);
+ /* save the attribute state for later removal*/
args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
+ args->rmtvaluelen2 = args->rmtvaluelen;
+
+ /*
+ * clear the remote attr state now that it is saved so that the
+ * values reflect the state of the attribute we are about to
+ * add, not the attribute we just found and will remove later.
+ */
+ args->rmtblkno = 0;
+ args->rmtblkcnt = 0;
+ args->rmtvaluelen = 0;
}
/*
@@ -794,6 +805,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
+ args->rmtvaluelen = args->rmtvaluelen2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
@@ -999,13 +1011,22 @@ restart:
trace_xfs_attr_node_replace(args);
+ /* save the attribute state for later removal*/
args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
args->rmtblkcnt2 = args->rmtblkcnt;
+ args->rmtvaluelen2 = args->rmtvaluelen;
+
+ /*
+ * clear the remote attr state now that it is saved so that the
+ * values reflect the state of the attribute we are about to
+ * add, not the attribute we just found and will remove later.
+ */
args->rmtblkno = 0;
args->rmtblkcnt = 0;
+ args->rmtvaluelen = 0;
}
retval = xfs_attr3_leaf_add(blk->bp, state->args);
@@ -1133,6 +1154,7 @@ restart:
args->blkno = args->blkno2;
args->rmtblkno = args->rmtblkno2;
args->rmtblkcnt = args->rmtblkcnt2;
+ args->rmtvaluelen = args->rmtvaluelen2;
if (args->rmtblkno) {
error = xfs_attr_rmtval_remove(args);
if (error)
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index fe9587fab17a..511c283459b1 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -1229,6 +1229,7 @@ xfs_attr3_leaf_add_work(
name_rmt->valueblk = 0;
args->rmtblkno = 1;
args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+ args->rmtvaluelen = args->valuelen;
}
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
@@ -2167,11 +2168,11 @@ xfs_attr3_leaf_lookup_int(
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
- args->valuelen = be32_to_cpu(name_rmt->valuelen);
+ args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = xfs_attr3_rmt_blocks(
args->dp->i_mount,
- args->valuelen);
+ args->rmtvaluelen);
return XFS_ERROR(EEXIST);
}
}
@@ -2220,19 +2221,19 @@ xfs_attr3_leaf_getvalue(
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
ASSERT(name_rmt->namelen == args->namelen);
ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
- valuelen = be32_to_cpu(name_rmt->valuelen);
+ args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
- valuelen);
+ args->rmtvaluelen);
if (args->flags & ATTR_KERNOVAL) {
- args->valuelen = valuelen;
+ args->valuelen = args->rmtvaluelen;
return 0;
}
- if (args->valuelen < valuelen) {
- args->valuelen = valuelen;
+ if (args->valuelen < args->rmtvaluelen) {
+ args->valuelen = args->rmtvaluelen;
return XFS_ERROR(ERANGE);
}
- args->valuelen = valuelen;
+ args->valuelen = args->rmtvaluelen;
}
return 0;
}
@@ -2519,7 +2520,7 @@ xfs_attr3_leaf_clearflag(
ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
- name_rmt->valuelen = cpu_to_be32(args->valuelen);
+ name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
@@ -2677,7 +2678,7 @@ xfs_attr3_leaf_flipflags(
ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
- name_rmt->valuelen = cpu_to_be32(args->valuelen);
+ name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
}
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 01db96f60cf0..833fe5d98d80 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -447,6 +447,7 @@ xfs_attr3_leaf_list_int(
args.dp = context->dp;
args.whichfork = XFS_ATTR_FORK;
args.valuelen = valuelen;
+ args.rmtvaluelen = valuelen;
args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
args.rmtblkcnt = xfs_attr3_rmt_blocks(
diff --git a/fs/xfs/xfs_attr_remote.c b/fs/xfs/xfs_attr_remote.c
index 6e37823e2932..d2e6e948cec7 100644
--- a/fs/xfs/xfs_attr_remote.c
+++ b/fs/xfs/xfs_attr_remote.c
@@ -337,7 +337,7 @@ xfs_attr_rmtval_get(
struct xfs_buf *bp;
xfs_dablk_t lblkno = args->rmtblkno;
__uint8_t *dst = args->value;
- int valuelen = args->valuelen;
+ int valuelen;
int nmap;
int error;
int blkcnt = args->rmtblkcnt;
@@ -347,7 +347,9 @@ xfs_attr_rmtval_get(
trace_xfs_attr_rmtval_get(args);
ASSERT(!(args->flags & ATTR_KERNOVAL));
+ ASSERT(args->rmtvaluelen == args->valuelen);
+ valuelen = args->rmtvaluelen;
while (valuelen > 0) {
nmap = ATTR_RMTVALUE_MAPSIZE;
error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
@@ -415,7 +417,7 @@ xfs_attr_rmtval_set(
* attributes have headers, we can't just do a straight byte to FSB
* conversion and have to take the header space into account.
*/
- blkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
+ blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
XFS_ATTR_FORK);
if (error)
@@ -480,7 +482,7 @@ xfs_attr_rmtval_set(
*/
lblkno = args->rmtblkno;
blkcnt = args->rmtblkcnt;
- valuelen = args->valuelen;
+ valuelen = args->rmtvaluelen;
while (valuelen > 0) {
struct xfs_buf *bp;
xfs_daddr_t dblkno;
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 6e95ea79f5d7..201c6091d26a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -60,10 +60,12 @@ typedef struct xfs_da_args {
int index; /* index of attr of interest in blk */
xfs_dablk_t rmtblkno; /* remote attr value starting blkno */
int rmtblkcnt; /* remote attr value block count */
+ int rmtvaluelen; /* remote attr value length in bytes */
xfs_dablk_t blkno2; /* blkno of 2nd attr leaf of interest */
int index2; /* index of 2nd attr in blk */
xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */
int rmtblkcnt2; /* remote attr value block count */
+ int rmtvaluelen2; /* remote attr value length in bytes */
int op_flags; /* operation flags */
enum xfs_dacmp cmpresult; /* name compare result for lookups */
} xfs_da_args_t;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 1399e187d425..753e467aa1a5 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -237,7 +237,7 @@ xfs_fs_nfs_commit_metadata(
if (!lsn)
return 0;
- return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+ return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
}
const struct export_operations xfs_export_operations = {
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 951a2321ee01..830c1c937b88 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -155,7 +155,7 @@ xfs_dir_fsync(
if (!lsn)
return 0;
- return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
+ return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
}
STATIC int
@@ -295,7 +295,7 @@ xfs_file_aio_read(
xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
if (inode->i_mapping->nrpages) {
- ret = -filemap_write_and_wait_range(
+ ret = filemap_write_and_wait_range(
VFS_I(ip)->i_mapping,
pos, -1);
if (ret) {
@@ -837,7 +837,7 @@ xfs_file_fallocate(
unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
if (offset & blksize_mask || len & blksize_mask) {
- error = -EINVAL;
+ error = EINVAL;
goto out_unlock;
}
@@ -846,7 +846,7 @@ xfs_file_fallocate(
* in which case it is effectively a truncate operation
*/
if (offset + len >= i_size_read(inode)) {
- error = -EINVAL;
+ error = EINVAL;
goto out_unlock;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ef1ca010f417..36d630319a27 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -72,8 +72,8 @@ xfs_initxattrs(
int error = 0;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- error = xfs_attr_set(ip, xattr->name, xattr->value,
- xattr->value_len, ATTR_SECURE);
+ error = -xfs_attr_set(ip, xattr->name, xattr->value,
+ xattr->value_len, ATTR_SECURE);
if (error < 0)
break;
}
@@ -93,8 +93,8 @@ xfs_init_security(
struct inode *dir,
const struct qstr *qstr)
{
- return security_inode_init_security(inode, dir, qstr,
- &xfs_initxattrs, NULL);
+ return -security_inode_init_security(inode, dir, qstr,
+ &xfs_initxattrs, NULL);
}
static void
@@ -124,15 +124,15 @@ xfs_cleanup_inode(
xfs_dentry_to_name(&teardown, dentry, 0);
xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
- iput(inode);
}
STATIC int
-xfs_vn_mknod(
+xfs_generic_create(
struct inode *dir,
struct dentry *dentry,
umode_t mode,
- dev_t rdev)
+ dev_t rdev,
+ bool tmpfile) /* unnamed file */
{
struct inode *inode;
struct xfs_inode *ip = NULL;
@@ -156,8 +156,12 @@ xfs_vn_mknod(
if (error)
return error;
- xfs_dentry_to_name(&name, dentry, mode);
- error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+ if (!tmpfile) {
+ xfs_dentry_to_name(&name, dentry, mode);
+ error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+ } else {
+ error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
+ }
if (unlikely(error))
goto out_free_acl;
@@ -169,18 +173,22 @@ xfs_vn_mknod(
#ifdef CONFIG_XFS_POSIX_ACL
if (default_acl) {
- error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
if (error)
goto out_cleanup_inode;
}
if (acl) {
- error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS);
if (error)
goto out_cleanup_inode;
}
#endif
- d_instantiate(dentry, inode);
+ if (tmpfile)
+ d_tmpfile(dentry, inode);
+ else
+ d_instantiate(dentry, inode);
+
out_free_acl:
if (default_acl)
posix_acl_release(default_acl);
@@ -189,11 +197,23 @@ xfs_vn_mknod(
return -error;
out_cleanup_inode:
- xfs_cleanup_inode(dir, inode, dentry);
+ if (!tmpfile)
+ xfs_cleanup_inode(dir, inode, dentry);
+ iput(inode);
goto out_free_acl;
}
STATIC int
+xfs_vn_mknod(
+ struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode,
+ dev_t rdev)
+{
+ return xfs_generic_create(dir, dentry, mode, rdev, false);
+}
+
+STATIC int
xfs_vn_create(
struct inode *dir,
struct dentry *dentry,
@@ -353,6 +373,7 @@ xfs_vn_symlink(
out_cleanup_inode:
xfs_cleanup_inode(dir, inode, dentry);
+ iput(inode);
out:
return -error;
}
@@ -1053,25 +1074,7 @@ xfs_vn_tmpfile(
struct dentry *dentry,
umode_t mode)
{
- int error;
- struct xfs_inode *ip;
- struct inode *inode;
-
- error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
- if (unlikely(error))
- return -error;
-
- inode = VFS_I(ip);
-
- error = xfs_init_security(inode, dir, &dentry->d_name);
- if (unlikely(error)) {
- iput(inode);
- return -error;
- }
-
- d_tmpfile(dentry, inode);
-
- return 0;
+ return xfs_generic_create(dir, dentry, mode, 0, true);
}
static const struct inode_operations xfs_inode_operations = {
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 08624dc67317..a5f8bd9899d3 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -616,11 +616,13 @@ xfs_log_mount(
int error = 0;
int min_logfsbs;
- if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
- xfs_notice(mp, "Mounting Filesystem");
- else {
+ if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
+ xfs_notice(mp, "Mounting V%d Filesystem",
+ XFS_SB_VERSION_NUM(&mp->m_sb));
+ } else {
xfs_notice(mp,
-"Mounting filesystem in no-recovery mode. Filesystem will be inconsistent.");
+"Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.",
+ XFS_SB_VERSION_NUM(&mp->m_sb));
ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 993cb19e7d39..944f3d9456a8 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -743,8 +743,6 @@ xfs_mountfs(
new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
mp->m_inode_cluster_size = new_size;
- xfs_info(mp, "Using inode cluster size of %d bytes",
- mp->m_inode_cluster_size);
}
/*
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 348e4d2ed6e6..dc977b6e6a36 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -843,22 +843,17 @@ xfs_qm_init_quotainfo(
qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
- if ((error = list_lru_init(&qinf->qi_lru))) {
- kmem_free(qinf);
- mp->m_quotainfo = NULL;
- return error;
- }
+ error = -list_lru_init(&qinf->qi_lru);
+ if (error)
+ goto out_free_qinf;
/*
* See if quotainodes are setup, and if not, allocate them,
* and change the superblock accordingly.
*/
- if ((error = xfs_qm_init_quotainos(mp))) {
- list_lru_destroy(&qinf->qi_lru);
- kmem_free(qinf);
- mp->m_quotainfo = NULL;
- return error;
- }
+ error = xfs_qm_init_quotainos(mp);
+ if (error)
+ goto out_free_lru;
INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
@@ -918,7 +913,7 @@ xfs_qm_init_quotainfo(
qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
-
+
xfs_qm_dqdestroy(dqp);
} else {
qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -935,6 +930,13 @@ xfs_qm_init_quotainfo(
qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
register_shrinker(&qinf->qi_shrinker);
return 0;
+
+out_free_lru:
+ list_lru_destroy(&qinf->qi_lru);
+out_free_qinf:
+ kmem_free(qinf);
+ mp->m_quotainfo = NULL;
+ return error;
}
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index 0c0e41bbe4e3..8baf61afae1d 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -201,10 +201,6 @@ xfs_mount_validate_sb(
* write validation, we don't need to check feature masks.
*/
if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
- xfs_alert(mp,
-"Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
-"Use of these features in this kernel is at your own risk!");
-
if (xfs_sb_has_compat_feature(sbp,
XFS_SB_FEAT_COMPAT_UNKNOWN)) {
xfs_warn(mp,
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 205376776377..3494eff8e4eb 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1433,11 +1433,11 @@ xfs_fs_fill_super(
if (error)
goto out_free_fsname;
- error = xfs_init_mount_workqueues(mp);
+ error = -xfs_init_mount_workqueues(mp);
if (error)
goto out_close_devices;
- error = xfs_icsb_init_counters(mp);
+ error = -xfs_icsb_init_counters(mp);
if (error)
goto out_destroy_workqueues;
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index b4ea8f50fc65..5e752b959054 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -12,7 +12,7 @@
[RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
- [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \
+ [RLIMIT_STACK] = { _STK_LIM, RLIM_INFINITY }, \
[RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
[RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_NPROC] = { 0, 0 }, \
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 49376aec2fbb..6dfd64b3a604 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -637,6 +637,22 @@
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 940ece4934ba..012d58fa8ff0 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -191,8 +191,8 @@
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
+ INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
diff --git a/include/dt-bindings/clk/at91.h b/include/dt-bindings/clock/at91.h
index 0b4cb999a3f7..0b4cb999a3f7 100644
--- a/include/dt-bindings/clk/at91.h
+++ b/include/dt-bindings/clock/at91.h
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c2515851c1aa..d60904b9e505 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -473,6 +473,7 @@ struct cftype {
};
extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
{
@@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
return task_css_check(task, subsys_id, false);
}
+/**
+ * task_css_is_root - test whether a task belongs to the root css
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Test whether @task belongs to the root css on the specified subsystem.
+ * May be invoked in any context.
+ */
+static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
+{
+ return task_css_check(task, subsys_id, true) ==
+ init_css_set.subsys[subsys_id];
+}
+
static inline struct cgroup *task_cgroup(struct task_struct *task,
int subsys_id)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 3b9bfdb83ba6..3c7ec327ebd2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -221,6 +221,8 @@ struct dentry_operations {
#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */
#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */
+#define DCACHE_MAY_FREE 0x00800000
+
extern seqlock_t rename_lock;
static inline int dname_external(const struct dentry *dentry)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5b337cf8fb86..b65166de1d9d 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -412,6 +412,16 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
return &mm->page_table_lock;
}
+static inline bool hugepages_supported(void)
+{
+ /*
+ * Some platform decide whether they support huge pages at boot
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+ * there is no such support
+ */
+ return HPAGE_SHIFT != 0;
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 7c8b20b120ea..a9a53b12397b 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -56,6 +56,7 @@ struct macvlan_dev {
int numqueues;
netdev_features_t tap_features;
int minor;
+ int nest_level;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8c0fb7f3a9a5..4967916fe4ac 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline int is_vlan_dev(struct net_device *dev)
+static inline bool is_vlan_dev(struct net_device *dev)
{
return dev->priv_flags & IFF_802_1Q_VLAN;
}
@@ -159,6 +159,7 @@ struct vlan_dev_priv {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ unsigned int nest_level;
};
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -197,6 +198,12 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
const struct net_device *by_dev);
extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+ BUG_ON(!is_vlan_dev(dev));
+ return vlan_dev_priv(dev)->nest_level;
+}
#else
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -263,6 +270,11 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
{
return false;
}
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+ BUG();
+ return 0;
+}
#endif
static inline bool vlan_hw_offload_capable(netdev_features_t features,
@@ -483,4 +495,5 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
*/
skb->protocol = htons(ETH_P_802_2);
}
+
#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 97ac926c78a7..051c85032f48 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -272,6 +272,11 @@ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
return -EINVAL;
}
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return 0;
+}
+
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index b0122dc6f96a..ca1be5c9136c 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -50,7 +50,24 @@ enum kernfs_node_flag {
/* @flags for kernfs_create_root() */
enum kernfs_root_flag {
- KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
+ /*
+ * kernfs_nodes are created in the deactivated state and invisible.
+ * They require explicit kernfs_activate() to become visible. This
+ * can be used to make related nodes become visible atomically
+ * after all nodes are created successfully.
+ */
+ KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
+
+ /*
+ * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+ * succeeds regardless of the RW permissions. sysfs had an extra
+ * layer of enforcement where open(2) fails with -EACCES regardless
+ * of CAP_DAC_OVERRIDE if the permission doesn't have the
+ * respective read or write access at all (none of S_IRUGO or
+ * S_IWUGO) or the respective operation isn't implemented. The
+ * following flag enables that behavior.
+ */
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
};
/* type-specific structures for kernfs_node union members */
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 34a513a2727b..a6a42dd02466 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -12,9 +12,9 @@
#endif
#ifdef __cplusplus
-#define CPP_ASMLINKAGE extern "C" __visible
+#define CPP_ASMLINKAGE extern "C"
#else
-#define CPP_ASMLINKAGE __visible
+#define CPP_ASMLINKAGE
#endif
#ifndef asmlinkage
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h
index 7c36cc55d2c7..443176ee1ab0 100644
--- a/include/linux/mfd/rtsx_common.h
+++ b/include/linux/mfd/rtsx_common.h
@@ -45,7 +45,6 @@ struct platform_device;
struct rtsx_slot {
struct platform_device *p_dev;
void (*card_event)(struct platform_device *p_dev);
- void (*done_transfer)(struct platform_device *p_dev);
};
#endif
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 8d6bbd609ad9..a3835976f7c6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -943,12 +943,6 @@ void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read, int timeout);
-int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read);
-int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read);
-int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int sg_count, bool read);
int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index b66e7610d4ee..7040dc98ff8b 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -421,6 +421,17 @@ struct mlx4_wqe_inline_seg {
__be32 byte_count;
};
+enum mlx4_update_qp_attr {
+ MLX4_UPDATE_QP_SMAC = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+ u8 smac_index;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ enum mlx4_update_qp_attr attr,
+ struct mlx4_update_qp_params *params);
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf9811e1321a..d6777060449f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -370,6 +370,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
+extern void kvfree(const void *addr);
+
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/include/linux/net.h b/include/linux/net.h
index 94734a6259a4..17d83393afcc 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -248,24 +248,17 @@ do { \
bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *done_key);
-#ifdef HAVE_JUMP_LABEL
-#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
- { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
-#else /* !HAVE_JUMP_LABEL */
-#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#endif /* HAVE_JUMP_LABEL */
-
#define net_get_random_once(buf, nbytes) \
({ \
bool ___ret = false; \
static bool ___done = false; \
- static struct static_key ___done_key = \
- ___NET_RANDOM_STATIC_KEY_INIT; \
- if (!static_key_true(&___done_key)) \
+ static struct static_key ___once_key = \
+ STATIC_KEY_INIT_TRUE; \
+ if (static_key_true(&___once_key)) \
___ret = __net_get_random_once(buf, \
nbytes, \
&___done, \
- &___done_key); \
+ &___once_key); \
___ret; \
})
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9ec3a945caf2..2db1610bf109 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1143,6 +1143,7 @@ struct net_device_ops {
netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
struct net_device *dev,
void *priv);
+ int (*ndo_get_lock_subclass)(struct net_device *dev);
};
/**
@@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
static inline void netif_addr_lock_nested(struct net_device *dev)
{
- spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
+ int subclass = SINGLE_DEPTH_NESTING;
+
+ if (dev->netdev_ops->ndo_get_lock_subclass)
+ subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+ spin_lock_nested(&dev->addr_list_lock, subclass);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
@@ -3050,10 +3056,19 @@ extern int weight_p;
extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+ for (iter = &(dev)->adj_list.upper, \
+ updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+ updev; \
+ updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
+/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
for (iter = &(dev)->all_adj_list.upper, \
updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
@@ -3077,6 +3092,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
priv; \
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+void *netdev_lower_get_next(struct net_device *dev,
+ struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+ for (iter = &(dev)->adj_list.lower, \
+ ldev = netdev_lower_get_next(dev, &(iter)); \
+ ldev; \
+ ldev = netdev_lower_get_next(dev, &(iter)))
+
void *netdev_adjacent_get_private(struct list_head *adj_list);
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@ -3092,6 +3115,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
+int dev_get_nest_level(struct net_device *dev,
+ bool (*type_check)(struct net_device *dev));
int skb_checksum_help(struct sk_buff *skb);
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path);
diff --git a/include/linux/of.h b/include/linux/of.h
index 3bad8d106e0e..e6f0988c1c68 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -349,7 +349,7 @@ int of_device_is_stdout_path(struct device_node *dn);
#else /* CONFIG_OF */
-static inline const char* of_node_full_name(struct device_node *np)
+static inline const char* of_node_full_name(const struct device_node *np)
{
return "<no-node>";
}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3356abcfff18..3ef6ea12806a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -402,6 +402,8 @@ struct perf_event {
struct ring_buffer *rb;
struct list_head rb_entry;
+ unsigned long rcu_batches;
+ int rcu_pending;
/* poll related */
wait_queue_head_t waitq;
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 8e3e66ac0a52..953937ea5233 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -4,6 +4,7 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/wait.h>
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
@@ -22,6 +23,10 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
#ifdef CONFIG_PROVE_LOCKING
extern int lockdep_rtnl_is_held(void);
#else
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 25f54c79f757..221b2bde3723 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -220,7 +220,7 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
#define TASK_PARKED 512
#define TASK_STATE_MAX 1024
-#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
*
* @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we
- * exit the critical section).
+ * exit the critical section);
+ *
+ * @dl_yielded tells if task gave up the cpu before consuming
+ * all its available runtime during the last job.
*/
- int dl_throttled, dl_new, dl_boosted;
+ int dl_throttled, dl_new, dl_boosted, dl_yielded;
/*
* Bandwidth enforcement timer. Each -deadline task has its
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f2f7398848cf..d82abd40a3c0 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -101,4 +101,13 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
+#ifdef CONFIG_SYSFS
+#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_remove(struct kmem_cache *);
+#else
+static inline void sysfs_slab_remove(struct kmem_cache *s)
+{
+}
+#endif
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 38e41e4d0998..1d09b46c1e48 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -127,6 +127,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg);
void rt6_ifdown(struct net *net, struct net_device *dev);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
/*
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index ed0b2c599a64..7c5cbfe3fc49 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -80,7 +80,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign(
__entry->ip = ip;
- __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
+ __entry->refcnt = __this_cpu_read(mod->refptr->incs) - __this_cpu_read(mod->refptr->decs);
__assign_str(name, mod->name);
),
diff --git a/include/uapi/asm-generic/resource.h b/include/uapi/asm-generic/resource.h
index f863428796d5..c6d10af50123 100644
--- a/include/uapi/asm-generic/resource.h
+++ b/include/uapi/asm-generic/resource.h
@@ -57,12 +57,5 @@
# define RLIM_INFINITY (~0UL)
#endif
-/*
- * RLIMIT_STACK default maximum - some architectures override it:
- */
-#ifndef _STK_LIM_MAX
-# define _STK_LIM_MAX RLIM_INFINITY
-#endif
-
#endif /* _UAPI_ASM_GENERIC_RESOURCE_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 6db66783d268..333640608087 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -697,9 +697,11 @@ __SYSCALL(__NR_finit_module, sys_finit_module)
__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
#define __NR_sched_getattr 275
__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 276
+__SYSCALL(__NR_renameat2, sys_renameat2)
#undef __NR_syscalls
-#define __NR_syscalls 276
+#define __NR_syscalls 277
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index dfa4c860ccef..b21ea454bd33 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -331,9 +331,17 @@ enum {
#define AUDIT_FAIL_PRINTK 1
#define AUDIT_FAIL_PANIC 2
+/*
+ * These bits disambiguate different calling conventions that share an
+ * ELF machine type, bitness, and endianness
+ */
+#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
+#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
+
/* distinguish syscall tables */
#define __AUDIT_ARCH_64BIT 0x80000000
#define __AUDIT_ARCH_LE 0x40000000
+
#define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARMEB (EM_ARM)
@@ -346,7 +354,11 @@ enum {
#define AUDIT_ARCH_MIPS (EM_MIPS)
#define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\
+ __AUDIT_ARCH_CONVENTION_MIPS64_N32)
#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
+ __AUDIT_ARCH_CONVENTION_MIPS64_N32)
#define AUDIT_ARCH_OPENRISC (EM_OPENRISC)
#define AUDIT_ARCH_PARISC (EM_PARISC)
#define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index cf4750e1bb49..40b5ca8a1b1f 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -96,6 +96,11 @@
*
* 7.23
* - add FUSE_WRITEBACK_CACHE
+ * - add time_gran to fuse_init_out
+ * - add reserved space to fuse_init_out
+ * - add FATTR_CTIME
+ * - add ctime and ctimensec to fuse_setattr_in
+ * - add FUSE_RENAME2 request
*/
#ifndef _LINUX_FUSE_H
@@ -191,6 +196,7 @@ struct fuse_file_lock {
#define FATTR_ATIME_NOW (1 << 7)
#define FATTR_MTIME_NOW (1 << 8)
#define FATTR_LOCKOWNER (1 << 9)
+#define FATTR_CTIME (1 << 10)
/**
* Flags returned by the OPEN request
@@ -348,6 +354,7 @@ enum fuse_opcode {
FUSE_BATCH_FORGET = 42,
FUSE_FALLOCATE = 43,
FUSE_READDIRPLUS = 44,
+ FUSE_RENAME2 = 45,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -426,6 +433,12 @@ struct fuse_rename_in {
uint64_t newdir;
};
+struct fuse_rename2_in {
+ uint64_t newdir;
+ uint32_t flags;
+ uint32_t padding;
+};
+
struct fuse_link_in {
uint64_t oldnodeid;
};
@@ -438,10 +451,10 @@ struct fuse_setattr_in {
uint64_t lock_owner;
uint64_t atime;
uint64_t mtime;
- uint64_t unused2;
+ uint64_t ctime;
uint32_t atimensec;
uint32_t mtimensec;
- uint32_t unused3;
+ uint32_t ctimensec;
uint32_t mode;
uint32_t unused4;
uint32_t uid;
@@ -559,6 +572,9 @@ struct fuse_init_in {
uint32_t flags;
};
+#define FUSE_COMPAT_INIT_OUT_SIZE 8
+#define FUSE_COMPAT_22_INIT_OUT_SIZE 24
+
struct fuse_init_out {
uint32_t major;
uint32_t minor;
@@ -567,6 +583,8 @@ struct fuse_init_out {
uint16_t max_background;
uint16_t congestion_threshold;
uint32_t max_write;
+ uint32_t time_gran;
+ uint32_t unused[9];
};
#define CUSE_INIT_INFO_MAX 4096
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 406010d4def0..9922b9b828a2 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -3894,6 +3894,8 @@ enum nl80211_ap_sme_features {
* @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
* to work properly to suppport receiving regulatory hints from
* cellular base stations.
+ * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
+ * here to reserve the value for API/ABI compatibility)
* @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
* equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
* mode
@@ -3938,7 +3940,7 @@ enum nl80211_feature_flags {
NL80211_FEATURE_HT_IBSS = 1 << 1,
NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
- /* bit 4 is reserved - don't use */
+ NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4,
NL80211_FEATURE_SAE = 1 << 5,
NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6,
NL80211_FEATURE_SCAN_FLUSH = 1 << 7,
diff --git a/init/main.c b/init/main.c
index 9c7fd4c9249f..48655ceb66f4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -476,7 +476,7 @@ static void __init mm_init(void)
vmalloc_init();
}
-asmlinkage void __init start_kernel(void)
+asmlinkage __visible void __init start_kernel(void)
{
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9fcdaa705b6c..3f1ca934a237 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -348,7 +348,7 @@ struct cgrp_cset_link {
* reference-counted, to improve performance when child cgroups
* haven't been created.
*/
-static struct css_set init_css_set = {
+struct css_set init_css_set = {
.refcount = ATOMIC_INIT(1),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
@@ -1495,7 +1495,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
*/
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
-retry:
+
mutex_lock(&cgroup_tree_mutex);
mutex_lock(&cgroup_mutex);
@@ -1503,7 +1503,7 @@ retry:
ret = parse_cgroupfs_options(data, &opts);
if (ret)
goto out_unlock;
-
+retry:
/* look for a matching existing root */
if (!opts.subsys_mask && !opts.none && !opts.name) {
cgrp_dfl_root_visible = true;
@@ -1562,9 +1562,9 @@ retry:
if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgroup_tree_mutex);
- kfree(opts.release_agent);
- kfree(opts.name);
msleep(10);
+ mutex_lock(&cgroup_tree_mutex);
+ mutex_lock(&cgroup_mutex);
goto retry;
}
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 2bc4a2256444..345628c78b5b 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -21,6 +21,7 @@
#include <linux/uaccess.h>
#include <linux/freezer.h>
#include <linux/seq_file.h>
+#include <linux/mutex.h>
/*
* A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
@@ -42,9 +43,10 @@ enum freezer_state_flags {
struct freezer {
struct cgroup_subsys_state css;
unsigned int state;
- spinlock_t lock;
};
+static DEFINE_MUTEX(freezer_mutex);
+
static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct freezer, css) : NULL;
@@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css)
if (!freezer)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&freezer->lock);
return &freezer->css;
}
@@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
struct freezer *freezer = css_freezer(css);
struct freezer *parent = parent_freezer(freezer);
- /*
- * The following double locking and freezing state inheritance
- * guarantee that @cgroup can never escape ancestors' freezing
- * states. See css_for_each_descendant_pre() for details.
- */
- if (parent)
- spin_lock_irq(&parent->lock);
- spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING);
+ mutex_lock(&freezer_mutex);
freezer->state |= CGROUP_FREEZER_ONLINE;
@@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css)
atomic_inc(&system_freezing_cnt);
}
- spin_unlock(&freezer->lock);
- if (parent)
- spin_unlock_irq(&parent->lock);
-
+ mutex_unlock(&freezer_mutex);
return 0;
}
@@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css)
{
struct freezer *freezer = css_freezer(css);
- spin_lock_irq(&freezer->lock);
+ mutex_lock(&freezer_mutex);
if (freezer->state & CGROUP_FREEZING)
atomic_dec(&system_freezing_cnt);
freezer->state = 0;
- spin_unlock_irq(&freezer->lock);
+ mutex_unlock(&freezer_mutex);
}
static void freezer_css_free(struct cgroup_subsys_state *css)
@@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
struct task_struct *task;
bool clear_frozen = false;
- spin_lock_irq(&freezer->lock);
+ mutex_lock(&freezer_mutex);
/*
* Make the new tasks conform to the current state of @new_css.
@@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
}
}
- spin_unlock_irq(&freezer->lock);
-
- /*
- * Propagate FROZEN clearing upwards. We may race with
- * update_if_frozen(), but as long as both work bottom-up, either
- * update_if_frozen() sees child's FROZEN cleared or we clear the
- * parent's FROZEN later. No parent w/ !FROZEN children can be
- * left FROZEN.
- */
+ /* propagate FROZEN clearing upwards */
while (clear_frozen && (freezer = parent_freezer(freezer))) {
- spin_lock_irq(&freezer->lock);
freezer->state &= ~CGROUP_FROZEN;
clear_frozen = freezer->state & CGROUP_FREEZING;
- spin_unlock_irq(&freezer->lock);
}
+
+ mutex_unlock(&freezer_mutex);
}
/**
@@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task)
{
struct freezer *freezer;
- rcu_read_lock();
- freezer = task_freezer(task);
-
/*
* The root cgroup is non-freezable, so we can skip locking the
* freezer. This is safe regardless of race with task migration.
@@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task)
* to do. If we lost and root is the new cgroup, noop is still the
* right thing to do.
*/
- if (!parent_freezer(freezer))
- goto out;
+ if (task_css_is_root(task, freezer_cgrp_id))
+ return;
- /*
- * Grab @freezer->lock and freeze @task after verifying @task still
- * belongs to @freezer and it's freezing. The former is for the
- * case where we have raced against task migration and lost and
- * @task is already in a different cgroup which may not be frozen.
- * This isn't strictly necessary as freeze_task() is allowed to be
- * called spuriously but let's do it anyway for, if nothing else,
- * documentation.
- */
- spin_lock_irq(&freezer->lock);
- if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
+ mutex_lock(&freezer_mutex);
+ rcu_read_lock();
+
+ freezer = task_freezer(task);
+ if (freezer->state & CGROUP_FREEZING)
freeze_task(task);
- spin_unlock_irq(&freezer->lock);
-out:
+
rcu_read_unlock();
+ mutex_unlock(&freezer_mutex);
}
/**
@@ -281,22 +255,24 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
struct css_task_iter it;
struct task_struct *task;
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- spin_lock_irq(&freezer->lock);
+ lockdep_assert_held(&freezer_mutex);
if (!(freezer->state & CGROUP_FREEZING) ||
(freezer->state & CGROUP_FROZEN))
- goto out_unlock;
+ return;
/* are all (live) children frozen? */
+ rcu_read_lock();
css_for_each_child(pos, css) {
struct freezer *child = css_freezer(pos);
if ((child->state & CGROUP_FREEZER_ONLINE) &&
- !(child->state & CGROUP_FROZEN))
- goto out_unlock;
+ !(child->state & CGROUP_FROZEN)) {
+ rcu_read_unlock();
+ return;
+ }
}
+ rcu_read_unlock();
/* are all tasks frozen? */
css_task_iter_start(css, &it);
@@ -317,21 +293,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css)
freezer->state |= CGROUP_FROZEN;
out_iter_end:
css_task_iter_end(&it);
-out_unlock:
- spin_unlock_irq(&freezer->lock);
}
static int freezer_read(struct seq_file *m, void *v)
{
struct cgroup_subsys_state *css = seq_css(m), *pos;
+ mutex_lock(&freezer_mutex);
rcu_read_lock();
/* update states bottom-up */
- css_for_each_descendant_post(pos, css)
+ css_for_each_descendant_post(pos, css) {
+ if (!css_tryget(pos))
+ continue;
+ rcu_read_unlock();
+
update_if_frozen(pos);
+ rcu_read_lock();
+ css_put(pos);
+ }
+
rcu_read_unlock();
+ mutex_unlock(&freezer_mutex);
seq_puts(m, freezer_state_strs(css_freezer(css)->state));
seq_putc(m, '\n');
@@ -373,7 +357,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
unsigned int state)
{
/* also synchronizes against task migration, see freezer_attach() */
- lockdep_assert_held(&freezer->lock);
+ lockdep_assert_held(&freezer_mutex);
if (!(freezer->state & CGROUP_FREEZER_ONLINE))
return;
@@ -414,31 +398,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
* descendant will try to inherit its parent's FREEZING state as
* CGROUP_FREEZING_PARENT.
*/
+ mutex_lock(&freezer_mutex);
rcu_read_lock();
css_for_each_descendant_pre(pos, &freezer->css) {
struct freezer *pos_f = css_freezer(pos);
struct freezer *parent = parent_freezer(pos_f);
- spin_lock_irq(&pos_f->lock);
+ if (!css_tryget(pos))
+ continue;
+ rcu_read_unlock();
- if (pos_f == freezer) {
+ if (pos_f == freezer)
freezer_apply_state(pos_f, freeze,
CGROUP_FREEZING_SELF);
- } else {
- /*
- * Our update to @parent->state is already visible
- * which is all we need. No need to lock @parent.
- * For more info on synchronization, see
- * freezer_post_create().
- */
+ else
freezer_apply_state(pos_f,
parent->state & CGROUP_FREEZING,
CGROUP_FREEZING_PARENT);
- }
- spin_unlock_irq(&pos_f->lock);
+ rcu_read_lock();
+ css_put(pos);
}
rcu_read_unlock();
+ mutex_unlock(&freezer_mutex);
}
static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 6cb20d2e7ee0..019d45008448 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -120,7 +120,7 @@ void context_tracking_user_enter(void)
* instead of preempt_schedule() to exit user context if needed before
* calling the scheduler.
*/
-asmlinkage void __sched notrace preempt_schedule_context(void)
+asmlinkage __visible void __sched notrace preempt_schedule_context(void)
{
enum ctx_state prev_ctx;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f83a71a3e46d..440eefc67397 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1443,6 +1443,11 @@ group_sched_out(struct perf_event *group_event,
cpuctx->exclusive = 0;
}
+struct remove_event {
+ struct perf_event *event;
+ bool detach_group;
+};
+
/*
* Cross CPU call to remove a performance event
*
@@ -1451,12 +1456,15 @@ group_sched_out(struct perf_event *group_event,
*/
static int __perf_remove_from_context(void *info)
{
- struct perf_event *event = info;
+ struct remove_event *re = info;
+ struct perf_event *event = re->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
+ if (re->detach_group)
+ perf_group_detach(event);
list_del_event(event, ctx);
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
ctx->is_active = 0;
@@ -1481,10 +1489,14 @@ static int __perf_remove_from_context(void *info)
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_remove_from_context(struct perf_event *event)
+static void perf_remove_from_context(struct perf_event *event, bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
+ struct remove_event re = {
+ .event = event,
+ .detach_group = detach_group,
+ };
lockdep_assert_held(&ctx->mutex);
@@ -1493,12 +1505,12 @@ static void perf_remove_from_context(struct perf_event *event)
* Per cpu events are removed via an smp call and
* the removal is always successful.
*/
- cpu_function_call(event->cpu, __perf_remove_from_context, event);
+ cpu_function_call(event->cpu, __perf_remove_from_context, &re);
return;
}
retry:
- if (!task_function_call(task, __perf_remove_from_context, event))
+ if (!task_function_call(task, __perf_remove_from_context, &re))
return;
raw_spin_lock_irq(&ctx->lock);
@@ -1515,6 +1527,8 @@ retry:
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
*/
+ if (detach_group)
+ perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
@@ -3178,7 +3192,8 @@ static void free_event_rcu(struct rcu_head *head)
}
static void ring_buffer_put(struct ring_buffer *rb);
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb);
static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
@@ -3238,8 +3253,6 @@ static void free_event(struct perf_event *event)
unaccount_event(event);
if (event->rb) {
- struct ring_buffer *rb;
-
/*
* Can happen when we close an event with re-directed output.
*
@@ -3247,12 +3260,7 @@ static void free_event(struct perf_event *event)
* over us; possibly making our ring_buffer_put() the last.
*/
mutex_lock(&event->mmap_mutex);
- rb = event->rb;
- if (rb) {
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
- ring_buffer_put(rb); /* could be last */
- }
+ ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
}
@@ -3281,10 +3289,7 @@ int perf_event_release_kernel(struct perf_event *event)
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
- raw_spin_lock_irq(&ctx->lock);
- perf_group_detach(event);
- raw_spin_unlock_irq(&ctx->lock);
- perf_remove_from_context(event);
+ perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
free_event(event);
@@ -3839,28 +3844,47 @@ unlock:
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
+ struct ring_buffer *old_rb = NULL;
unsigned long flags;
- if (!list_empty(&event->rb_entry))
- return;
+ if (event->rb) {
+ /*
+ * Should be impossible, we set this when removing
+ * event->rb_entry and wait/clear when adding event->rb_entry.
+ */
+ WARN_ON_ONCE(event->rcu_pending);
- spin_lock_irqsave(&rb->event_lock, flags);
- if (list_empty(&event->rb_entry))
- list_add(&event->rb_entry, &rb->event_list);
- spin_unlock_irqrestore(&rb->event_lock, flags);
-}
+ old_rb = event->rb;
+ event->rcu_batches = get_state_synchronize_rcu();
+ event->rcu_pending = 1;
-static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
-{
- unsigned long flags;
+ spin_lock_irqsave(&old_rb->event_lock, flags);
+ list_del_rcu(&event->rb_entry);
+ spin_unlock_irqrestore(&old_rb->event_lock, flags);
+ }
- if (list_empty(&event->rb_entry))
- return;
+ if (event->rcu_pending && rb) {
+ cond_synchronize_rcu(event->rcu_batches);
+ event->rcu_pending = 0;
+ }
+
+ if (rb) {
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_add_rcu(&event->rb_entry, &rb->event_list);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+ }
+
+ rcu_assign_pointer(event->rb, rb);
- spin_lock_irqsave(&rb->event_lock, flags);
- list_del_init(&event->rb_entry);
- wake_up_all(&event->waitq);
- spin_unlock_irqrestore(&rb->event_lock, flags);
+ if (old_rb) {
+ ring_buffer_put(old_rb);
+ /*
+ * Since we detached before setting the new rb, so that we
+ * could attach the new rb, we could have missed a wakeup.
+ * Provide it now.
+ */
+ wake_up_all(&event->waitq);
+ }
}
static void ring_buffer_wakeup(struct perf_event *event)
@@ -3929,7 +3953,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
- struct ring_buffer *rb = event->rb;
+ struct ring_buffer *rb = ring_buffer_get(event);
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
@@ -3937,18 +3961,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
- return;
+ goto out_put;
- /* Detach current event from the buffer. */
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
+ ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex);
/* If there's still other mmap()s of this buffer, we're done. */
- if (atomic_read(&rb->mmap_count)) {
- ring_buffer_put(rb); /* can't be last */
- return;
- }
+ if (atomic_read(&rb->mmap_count))
+ goto out_put;
/*
* No other mmap()s, detach from all other events that might redirect
@@ -3978,11 +3998,9 @@ again:
* still restart the iteration to make sure we're not now
* iterating the wrong list.
*/
- if (event->rb == rb) {
- rcu_assign_pointer(event->rb, NULL);
- ring_buffer_detach(event, rb);
- ring_buffer_put(rb); /* can't be last, we still have one */
- }
+ if (event->rb == rb)
+ ring_buffer_attach(event, NULL);
+
mutex_unlock(&event->mmap_mutex);
put_event(event);
@@ -4007,6 +4025,7 @@ again:
vma->vm_mm->pinned_vm -= mmap_locked;
free_uid(mmap_user);
+out_put:
ring_buffer_put(rb); /* could be last */
}
@@ -4124,7 +4143,6 @@ again:
vma->vm_mm->pinned_vm += extra;
ring_buffer_attach(event, rb);
- rcu_assign_pointer(event->rb, rb);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
@@ -5408,6 +5426,9 @@ struct swevent_htable {
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
+
+ /* Keeps track of cpu being initialized/exited */
+ bool online;
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5654,8 +5675,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
- if (WARN_ON_ONCE(!head))
+ if (!head) {
+ /*
+ * We can race with cpu hotplug code. Do not
+ * WARN if the cpu just got unplugged.
+ */
+ WARN_ON_ONCE(swhash->online);
return -EINVAL;
+ }
hlist_add_head_rcu(&event->hlist_entry, head);
@@ -6914,7 +6941,7 @@ err_size:
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
- struct ring_buffer *rb = NULL, *old_rb = NULL;
+ struct ring_buffer *rb = NULL;
int ret = -EINVAL;
if (!output_event)
@@ -6942,8 +6969,6 @@ set:
if (atomic_read(&event->mmap_count))
goto unlock;
- old_rb = event->rb;
-
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
@@ -6951,23 +6976,7 @@ set:
goto unlock;
}
- if (old_rb)
- ring_buffer_detach(event, old_rb);
-
- if (rb)
- ring_buffer_attach(event, rb);
-
- rcu_assign_pointer(event->rb, rb);
-
- if (old_rb) {
- ring_buffer_put(old_rb);
- /*
- * Since we detached before setting the new rb, so that we
- * could attach the new rb, we could have missed a wakeup.
- * Provide it now.
- */
- wake_up_all(&event->waitq);
- }
+ ring_buffer_attach(event, rb);
ret = 0;
unlock:
@@ -7018,6 +7027,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
+ } else {
+ if (attr.sample_period & (1ULL << 63))
+ return -EINVAL;
}
/*
@@ -7165,7 +7177,7 @@ SYSCALL_DEFINE5(perf_event_open,
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
- perf_remove_from_context(group_leader);
+ perf_remove_from_context(group_leader, false);
/*
* Removing from the context ends up with disabled
@@ -7175,7 +7187,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
- perf_remove_from_context(sibling);
+ perf_remove_from_context(sibling, false);
perf_event__state_init(sibling);
put_ctx(gctx);
}
@@ -7305,7 +7317,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
mutex_lock(&src_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
- perf_remove_from_context(event);
+ perf_remove_from_context(event, false);
unaccount_event_cpu(event, src_cpu);
put_ctx(src_ctx);
list_add(&event->migrate_entry, &events);
@@ -7367,13 +7379,7 @@ __perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
- if (child_event->parent) {
- raw_spin_lock_irq(&child_ctx->lock);
- perf_group_detach(child_event);
- raw_spin_unlock_irq(&child_ctx->lock);
- }
-
- perf_remove_from_context(child_event);
+ perf_remove_from_context(child_event, !!child_event->parent);
/*
* It can happen that the parent exits first, and has events
@@ -7724,6 +7730,8 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
+ if (!parent_ctx)
+ return 0;
/*
* No need to check if parent_ctx != NULL here; since we saw
@@ -7835,6 +7843,7 @@ static void perf_event_init_cpu(int cpu)
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
+ swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
@@ -7857,14 +7866,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
static void __perf_event_exit_context(void *__info)
{
+ struct remove_event re = { .detach_group = false };
struct perf_event_context *ctx = __info;
- struct perf_event *event;
perf_pmu_rotate_stop(ctx->pmu);
rcu_read_lock();
- list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
- __perf_remove_from_context(event);
+ list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
+ __perf_remove_from_context(&re);
rcu_read_unlock();
}
@@ -7892,6 +7901,7 @@ static void perf_event_exit_cpu(int cpu)
perf_event_exit_cpu_context(cpu);
mutex_lock(&swhash->hlist_mutex);
+ swhash->online = false;
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 6b715c0af1b1..e0501fe7140d 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -990,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
/* Remove an active timer from the queue: */
ret = remove_hrtimer(timer, base);
- /* Switch the timer base, if necessary: */
- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
if (mode & HRTIMER_MODE_REL) {
- tim = ktime_add_safe(tim, new_base->get_time());
+ tim = ktime_add_safe(tim, base->get_time());
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
* to signal that they simply return xtime in
@@ -1009,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+ /* Switch the timer base, if necessary: */
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
timer_stats_hrtimer_set_start_info(timer);
leftmost = enqueue_hrtimer(timer, new_base);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index b0e9467922e1..d24e4339b46d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
}
EXPORT_SYMBOL_GPL(debug_show_held_locks);
-asmlinkage void lockdep_sys_exit(void)
+asmlinkage __visible void lockdep_sys_exit(void)
{
struct task_struct *curr = current;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 18fb7a2fb14b..1ea328aafdc9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1586,7 +1586,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
return -ENOMEM;
}
-asmlinkage int swsusp_save(void)
+asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a45b50962295..7228258b85ec 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1674,7 +1674,7 @@ EXPORT_SYMBOL(printk_emit);
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
-asmlinkage int printk(const char *fmt, ...)
+asmlinkage __visible int printk(const char *fmt, ...)
{
va_list args;
int r;
@@ -1737,7 +1737,7 @@ void early_vprintk(const char *fmt, va_list ap)
}
}
-asmlinkage void early_printk(const char *fmt, ...)
+asmlinkage __visible void early_printk(const char *fmt, ...)
{
va_list ap;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 268a45ea238c..204d3d281809 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq)
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
*/
-asmlinkage void schedule_tail(struct task_struct *prev)
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
@@ -2592,8 +2592,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
if (likely(prev->sched_class == class &&
rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq, prev);
- if (likely(p && p != RETRY_TASK))
- return p;
+ if (unlikely(p == RETRY_TASK))
+ goto again;
+
+ /* assumes fair_sched_class->next == idle_sched_class */
+ if (unlikely(!p))
+ p = idle_sched_class.pick_next_task(rq, prev);
+
+ return p;
}
again:
@@ -2741,7 +2747,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
blk_schedule_flush_plug(tsk);
}
-asmlinkage void __sched schedule(void)
+asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
@@ -2751,7 +2757,7 @@ asmlinkage void __sched schedule(void)
EXPORT_SYMBOL(schedule);
#ifdef CONFIG_CONTEXT_TRACKING
-asmlinkage void __sched schedule_user(void)
+asmlinkage __visible void __sched schedule_user(void)
{
/*
* If we come here after a random call to set_need_resched(),
@@ -2783,7 +2789,7 @@ void __sched schedule_preempt_disabled(void)
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
-asmlinkage void __sched notrace preempt_schedule(void)
+asmlinkage __visible void __sched notrace preempt_schedule(void)
{
/*
* If there is a non-zero preempt_count or interrupts are disabled,
@@ -2813,7 +2819,7 @@ EXPORT_SYMBOL(preempt_schedule);
* Note, that this is called and return with irqs disabled. This will
* protect us against recursive calling from irq.
*/
-asmlinkage void __sched preempt_schedule_irq(void)
+asmlinkage __visible void __sched preempt_schedule_irq(void)
{
enum ctx_state prev_state;
@@ -3124,6 +3130,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se->dl_throttled = 0;
dl_se->dl_new = 1;
+ dl_se->dl_yielded = 0;
}
static void __setscheduler_params(struct task_struct *p,
@@ -3639,6 +3646,7 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
* sys_sched_setattr - same as above, but with extended sched_attr
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
+ * @flags: for future extension.
*/
SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, flags)
@@ -3783,6 +3791,7 @@ err_size:
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
* @size: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
*/
SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
unsigned int, size, unsigned int, flags)
@@ -6017,6 +6026,8 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
,
.last_balance = jiffies,
.balance_interval = sd_weight,
+ .max_newidle_lb_cost = 0,
+ .next_decay_max_lb_cost = jiffies,
};
SD_INIT_NAME(sd, NUMA);
sd->private = &tl->data;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5b9bb42b2d47..ab001b5d5048 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -210,7 +210,5 @@ int cpudl_init(struct cpudl *cp)
*/
void cpudl_cleanup(struct cpudl *cp)
{
- /*
- * nothing to do for the moment
- */
+ free_cpumask_var(cp->free_cpus);
}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8b836b376d91..3031bac8aa3e 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -70,8 +70,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
int idx = 0;
int task_pri = convert_prio(p->prio);
- if (task_pri >= MAX_RT_PRIO)
- return 0;
+ BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
for (idx = 0; idx < task_pri; idx++) {
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a95097cb4591..72fdf06ef865 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -332,50 +332,50 @@ out:
* softirq as those do not count in task exec_runtime any more.
*/
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
- struct rq *rq)
+ struct rq *rq, int ticks)
{
- cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+ cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
+ u64 cputime = (__force u64) cputime_one_jiffy;
u64 *cpustat = kcpustat_this_cpu->cpustat;
if (steal_account_process_tick())
return;
+ cputime *= ticks;
+ scaled *= ticks;
+
if (irqtime_account_hi_update()) {
- cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy;
+ cpustat[CPUTIME_IRQ] += cputime;
} else if (irqtime_account_si_update()) {
- cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy;
+ cpustat[CPUTIME_SOFTIRQ] += cputime;
} else if (this_cpu_ksoftirqd() == p) {
/*
* ksoftirqd time do not get accounted in cpu_softirq_time.
* So, we have to handle it separately here.
* Also, p->stime needs to be updated for ksoftirqd.
*/
- __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
- CPUTIME_SOFTIRQ);
+ __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
} else if (user_tick) {
- account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+ account_user_time(p, cputime, scaled);
} else if (p == rq->idle) {
- account_idle_time(cputime_one_jiffy);
+ account_idle_time(cputime);
} else if (p->flags & PF_VCPU) { /* System time or guest time */
- account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
+ account_guest_time(p, cputime, scaled);
} else {
- __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
- CPUTIME_SYSTEM);
+ __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
}
}
static void irqtime_account_idle_ticks(int ticks)
{
- int i;
struct rq *rq = this_rq();
- for (i = 0; i < ticks; i++)
- irqtime_account_process_tick(current, 0, rq);
+ irqtime_account_process_tick(current, 0, rq, ticks);
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
static inline void irqtime_account_idle_ticks(int ticks) {}
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
- struct rq *rq) {}
+ struct rq *rq, int nr_ticks) {}
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
/*
@@ -464,7 +464,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
return;
if (sched_clock_irqtime) {
- irqtime_account_process_tick(p, user_tick, rq);
+ irqtime_account_process_tick(p, user_tick, rq, 1);
return;
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b08095786cb8..800e99b99075 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
sched_clock_tick();
update_rq_clock(rq);
dl_se->dl_throttled = 0;
+ dl_se->dl_yielded = 0;
if (p->on_rq) {
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
* We make the task go to sleep until its current deadline by
* forcing its runtime to zero. This way, update_curr_dl() stops
* it and the bandwidth timer will wake it up and will give it
- * new scheduling parameters (thanks to dl_new=1).
+ * new scheduling parameters (thanks to dl_yielded=1).
*/
if (p->dl.runtime > 0) {
- rq->curr->dl.dl_new = 1;
+ rq->curr->dl.dl_yielded = 1;
p->dl.runtime = 0;
}
update_curr_dl(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7570dd969c28..0fdb96de81a5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6653,6 +6653,7 @@ static int idle_balance(struct rq *this_rq)
int this_cpu = this_rq->cpu;
idle_enter_fair(this_rq);
+
/*
* We must set idle_stamp _before_ calling idle_balance(), such that we
* measure the duration of idle_balance() as idle time.
@@ -6705,14 +6706,16 @@ static int idle_balance(struct rq *this_rq)
raw_spin_lock(&this_rq->lock);
+ if (curr_cost > this_rq->max_idle_balance_cost)
+ this_rq->max_idle_balance_cost = curr_cost;
+
/*
- * While browsing the domains, we released the rq lock.
- * A task could have be enqueued in the meantime
+ * While browsing the domains, we released the rq lock, a task could
+ * have been enqueued in the meantime. Since we're not going idle,
+ * pretend we pulled a task.
*/
- if (this_rq->cfs.h_nr_running && !pulled_task) {
+ if (this_rq->cfs.h_nr_running && !pulled_task)
pulled_task = 1;
- goto out;
- }
if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
/*
@@ -6722,9 +6725,6 @@ static int idle_balance(struct rq *this_rq)
this_rq->next_balance = next_balance;
}
- if (curr_cost > this_rq->max_idle_balance_cost)
- this_rq->max_idle_balance_cost = curr_cost;
-
out:
/* Is there a task of a high priority class? */
if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 33e4648ae0e7..92f24f5e8d52 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -223,7 +223,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
-asmlinkage void __do_softirq(void)
+asmlinkage __visible void __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
@@ -299,7 +299,7 @@ restart:
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
-asmlinkage void do_softirq(void)
+asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index ac5b23cf7212..6620e5837ce2 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -188,7 +188,6 @@ static int tracepoint_add_func(struct tracepoint *tp,
WARN_ON_ONCE(1);
return PTR_ERR(old);
}
- release_probes(old);
/*
* rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -200,6 +199,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
rcu_assign_pointer(tp->funcs, tp_funcs);
if (!static_key_enabled(&tp->key))
static_key_slow_inc(&tp->key);
+ release_probes(old);
return 0;
}
@@ -221,7 +221,6 @@ static int tracepoint_remove_func(struct tracepoint *tp,
WARN_ON_ONCE(1);
return PTR_ERR(old);
}
- release_probes(old);
if (!tp_funcs) {
/* Removed last function */
@@ -232,6 +231,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
static_key_slow_dec(&tp->key);
}
rcu_assign_pointer(tp->funcs, tp_funcs);
+ release_probes(old);
return 0;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0ee63af30bd1..8edc87185427 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1916,6 +1916,12 @@ static void send_mayday(struct work_struct *work)
/* mayday mayday mayday */
if (list_empty(&pwq->mayday_node)) {
+ /*
+ * If @pwq is for an unbound wq, its base ref may be put at
+ * any time due to an attribute change. Pin @pwq until the
+ * rescuer is done with it.
+ */
+ get_pwq(pwq);
list_add_tail(&pwq->mayday_node, &wq->maydays);
wake_up_process(wq->rescuer->task);
}
@@ -2398,6 +2404,7 @@ static int rescuer_thread(void *__rescuer)
struct worker *rescuer = __rescuer;
struct workqueue_struct *wq = rescuer->rescue_wq;
struct list_head *scheduled = &rescuer->scheduled;
+ bool should_stop;
set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -2409,11 +2416,15 @@ static int rescuer_thread(void *__rescuer)
repeat:
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
- rescuer->task->flags &= ~PF_WQ_WORKER;
- return 0;
- }
+ /*
+ * By the time the rescuer is requested to stop, the workqueue
+ * shouldn't have any work pending, but @wq->maydays may still have
+ * pwq(s) queued. This can happen by non-rescuer workers consuming
+ * all the work items before the rescuer got to them. Go through
+ * @wq->maydays processing before acting on should_stop so that the
+ * list is always empty on exit.
+ */
+ should_stop = kthread_should_stop();
/* see whether any pwq is asking for help */
spin_lock_irq(&wq_mayday_lock);
@@ -2445,6 +2456,12 @@ repeat:
process_scheduled_works(rescuer);
/*
+ * Put the reference grabbed by send_mayday(). @pool won't
+ * go away while we're holding its lock.
+ */
+ put_pwq(pwq);
+
+ /*
* Leave this pool. If keep_working() is %true, notify a
* regular worker; otherwise, we end up with 0 concurrency
* and stalling the execution.
@@ -2459,6 +2476,12 @@ repeat:
spin_unlock_irq(&wq_mayday_lock);
+ if (should_stop) {
+ __set_current_state(TASK_RUNNING);
+ rescuer->task->flags &= ~PF_WQ_WORKER;
+ return 0;
+ }
+
/* rescuers should never participate in concurrency management */
WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
schedule();
@@ -4100,7 +4123,8 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
if (!pwq) {
pr_warning("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
wq->name);
- goto out_unlock;
+ mutex_lock(&wq->mutex);
+ goto use_dfl_pwq;
}
/*
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index f23b63f0a1c3..6745c6230db3 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -23,7 +23,7 @@ static void __dump_stack(void)
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
{
int was_locked;
int old;
@@ -55,7 +55,7 @@ retry:
preempt_enable();
}
#else
-asmlinkage void dump_stack(void)
+asmlinkage __visible void dump_stack(void)
{
__dump_stack();
}
diff --git a/mm/Kconfig b/mm/Kconfig
index ebe5880c29d6..1b5a95f0fa01 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -581,3 +581,18 @@ config PGTABLE_MAPPING
config GENERIC_EARLY_IOREMAP
bool
+
+config MAX_STACK_SIZE_MB
+ int "Maximum user stack size for 32-bit processes (MB)"
+ default 80
+ range 8 256 if METAG
+ range 8 2048
+ depends on STACK_GROWSUP && (!64BIT || COMPAT)
+ help
+ This is the maximum stack size in Megabytes in the VM layout of 32-bit
+ user processes when the stack grows upwards (currently only on parisc
+ and metag arch). The stack will be located at the highest memory
+ address minus the given value, unless the RLIMIT_STACK hard limit is
+ changed to a smaller value in which case that is used.
+
+ A sane initial value is 80 MB.
diff --git a/mm/compaction.c b/mm/compaction.c
index 37f976287068..627dc2e4320f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -671,16 +671,20 @@ static void isolate_freepages(struct zone *zone,
struct compact_control *cc)
{
struct page *page;
- unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+ unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages;
/*
* Initialise the free scanner. The starting point is where we last
- * scanned from (or the end of the zone if starting). The low point
- * is the end of the pageblock the migration scanner is using.
+ * successfully isolated from, zone-cached value, or the end of the
+ * zone when isolating for the first time. We need this aligned to
+ * the pageblock boundary, because we do pfn -= pageblock_nr_pages
+ * in the for loop.
+ * The low boundary is the end of the pageblock the migration scanner
+ * is using.
*/
- pfn = cc->free_pfn;
+ pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
/*
@@ -700,6 +704,7 @@ static void isolate_freepages(struct zone *zone,
for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
pfn -= pageblock_nr_pages) {
unsigned long isolated;
+ unsigned long end_pfn;
/*
* This can iterate a massively long zone without finding any
@@ -734,13 +739,10 @@ static void isolate_freepages(struct zone *zone,
isolated = 0;
/*
- * As pfn may not start aligned, pfn+pageblock_nr_page
- * may cross a MAX_ORDER_NR_PAGES boundary and miss
- * a pfn_valid check. Ensure isolate_freepages_block()
- * only scans within a pageblock
+ * Take care when isolating in last pageblock of a zone which
+ * ends in the middle of a pageblock.
*/
- end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
- end_pfn = min(end_pfn, z_end_pfn);
+ end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
isolated = isolate_freepages_block(cc, pfn, end_pfn,
freelist, false);
nr_freepages += isolated;
diff --git a/mm/filemap.c b/mm/filemap.c
index 5020b280a771..088358c8006b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -257,9 +257,11 @@ static int filemap_check_errors(struct address_space *mapping)
{
int ret = 0;
/* Check for outstanding write errors */
- if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+ if (test_bit(AS_ENOSPC, &mapping->flags) &&
+ test_and_clear_bit(AS_ENOSPC, &mapping->flags))
ret = -ENOSPC;
- if (test_and_clear_bit(AS_EIO, &mapping->flags))
+ if (test_bit(AS_EIO, &mapping->flags) &&
+ test_and_clear_bit(AS_EIO, &mapping->flags))
ret = -EIO;
return ret;
}
@@ -906,8 +908,8 @@ EXPORT_SYMBOL(page_cache_prev_hole);
* Looks up the page cache slot at @mapping & @offset. If there is a
* page cache page, it is returned with an increased refcount.
*
- * If the slot holds a shadow entry of a previously evicted page, it
- * is returned.
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
*
* Otherwise, %NULL is returned.
*/
@@ -928,9 +930,9 @@ repeat:
if (radix_tree_deref_retry(page))
goto repeat;
/*
- * Otherwise, shmem/tmpfs must be storing a swap entry
- * here as an exceptional entry: so return it without
- * attempting to raise page count.
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Return
+ * it without attempting to raise page count.
*/
goto out;
}
@@ -983,8 +985,8 @@ EXPORT_SYMBOL(find_get_page);
* page cache page, it is returned locked and with an increased
* refcount.
*
- * If the slot holds a shadow entry of a previously evicted page, it
- * is returned.
+ * If the slot holds a shadow entry of a previously evicted page, or a
+ * swap entry from shmem/tmpfs, it is returned.
*
* Otherwise, %NULL is returned.
*
@@ -1099,8 +1101,8 @@ EXPORT_SYMBOL(find_or_create_page);
* with ascending indexes. There may be holes in the indices due to
* not-present pages.
*
- * Any shadow entries of evicted pages are included in the returned
- * array.
+ * Any shadow entries of evicted pages, or swap entries from
+ * shmem/tmpfs, are included in the returned array.
*
* find_get_entries() returns the number of pages and shadow entries
* which were found.
@@ -1128,9 +1130,9 @@ repeat:
if (radix_tree_deref_retry(page))
goto restart;
/*
- * Otherwise, we must be storing a swap entry
- * here as an exceptional entry: so return it
- * without attempting to raise page count.
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Return
+ * it without attempting to raise page count.
*/
goto export;
}
@@ -1198,9 +1200,9 @@ repeat:
goto restart;
}
/*
- * Otherwise, shmem/tmpfs must be storing a swap entry
- * here as an exceptional entry: so skip over it -
- * we only reach this from invalidate_mapping_pages().
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Skip
+ * over it.
*/
continue;
}
@@ -1265,9 +1267,9 @@ repeat:
goto restart;
}
/*
- * Otherwise, shmem/tmpfs must be storing a swap entry
- * here as an exceptional entry: so stop looking for
- * contiguous pages.
+ * A shadow entry of a recently evicted page,
+ * or a swap entry from shmem/tmpfs. Stop
+ * looking for contiguous pages.
*/
break;
}
@@ -1341,10 +1343,17 @@ repeat:
goto restart;
}
/*
- * This function is never used on a shmem/tmpfs
- * mapping, so a swap entry won't be found here.
+ * A shadow entry of a recently evicted page.
+ *
+ * Those entries should never be tagged, but
+ * this tree walk is lockless and the tags are
+ * looked up in bulk, one radix tree node at a
+ * time, so there is a sizable window for page
+ * reclaim to evict a page we saw tagged.
+ *
+ * Skip over it.
*/
- BUG();
+ continue;
}
if (!page_cache_get_speculative(page))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 246192929a2d..c82290b9c1fc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1981,11 +1981,7 @@ static int __init hugetlb_init(void)
{
int i;
- /* Some platform decide whether they support huge pages at boot
- * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
- * there is no such support
- */
- if (HPAGE_SHIFT == 0)
+ if (!hugepages_supported())
return 0;
if (!size_to_hstate(default_hstate_size)) {
@@ -2112,6 +2108,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
unsigned long tmp;
int ret;
+ if (!hugepages_supported())
+ return -ENOTSUPP;
+
tmp = h->max_huge_pages;
if (write && h->order >= MAX_ORDER)
@@ -2165,6 +2164,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
unsigned long tmp;
int ret;
+ if (!hugepages_supported())
+ return -ENOTSUPP;
+
tmp = h->nr_overcommit_huge_pages;
if (write && h->order >= MAX_ORDER)
@@ -2190,6 +2192,8 @@ out:
void hugetlb_report_meminfo(struct seq_file *m)
{
struct hstate *h = &default_hstate;
+ if (!hugepages_supported())
+ return;
seq_printf(m,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
@@ -2206,6 +2210,8 @@ void hugetlb_report_meminfo(struct seq_file *m)
int hugetlb_report_node_meminfo(int nid, char *buf)
{
struct hstate *h = &default_hstate;
+ if (!hugepages_supported())
+ return 0;
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
"Node %d HugePages_Free: %5u\n"
@@ -2220,6 +2226,9 @@ void hugetlb_show_meminfo(void)
struct hstate *h;
int nid;
+ if (!hugepages_supported())
+ return;
+
for_each_node_state(nid, N_MEMORY)
for_each_hstate(h)
pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 91d67eaee050..8d2fcdfeff7f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1775,10 +1775,9 @@ void __init kmemleak_init(void)
int i;
unsigned long flags;
- kmemleak_early_log = 0;
-
#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
if (!kmemleak_skip_disable) {
+ kmemleak_early_log = 0;
kmemleak_disable();
return;
}
@@ -1796,6 +1795,7 @@ void __init kmemleak_init(void)
/* the kernel is still in UP mode, so disabling the IRQs is enough */
local_irq_save(flags);
+ kmemleak_early_log = 0;
if (kmemleak_error) {
local_irq_restore(flags);
return;
diff --git a/mm/madvise.c b/mm/madvise.c
index 539eeb96b323..a402f8fdc68e 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -195,7 +195,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,
for (; start < end; start += PAGE_SIZE) {
index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- page = find_get_page(mapping, index);
+ page = find_get_entry(mapping, index);
if (!radix_tree_exceptional_entry(page)) {
if (page)
page_cache_release(page);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 29501f040568..5177c6d4a2dd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1077,9 +1077,18 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
rcu_read_lock();
do {
- memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
+ /*
+ * Page cache insertions can happen withou an
+ * actual mm context, e.g. during disk probing
+ * on boot, loopback IO, acct() writes etc.
+ */
+ if (unlikely(!mm))
memcg = root_mem_cgroup;
+ else {
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!memcg))
+ memcg = root_mem_cgroup;
+ }
} while (!css_tryget(&memcg->css));
rcu_read_unlock();
return memcg;
@@ -3958,17 +3967,9 @@ int mem_cgroup_charge_file(struct page *page, struct mm_struct *mm,
return 0;
}
- /*
- * Page cache insertions can happen without an actual mm
- * context, e.g. during disk probing on boot.
- */
- if (unlikely(!mm))
- memcg = root_mem_cgroup;
- else {
- memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
- if (!memcg)
- return -ENOMEM;
- }
+ memcg = mem_cgroup_try_charge_mm(mm, gfp_mask, 1, true);
+ if (!memcg)
+ return -ENOMEM;
__mem_cgroup_commit_charge(memcg, page, 1, type, false);
return 0;
}
@@ -6686,16 +6687,20 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
pgoff = pte_to_pgoff(ptent);
/* page is moved even if it's not RSS of this task(page-faulted). */
- page = find_get_page(mapping, pgoff);
-
#ifdef CONFIG_SWAP
/* shmem/tmpfs may report page out on swap: account for that too. */
- if (radix_tree_exceptional_entry(page)) {
- swp_entry_t swap = radix_to_swp_entry(page);
- if (do_swap_account)
- *entry = swap;
- page = find_get_page(swap_address_space(swap), swap.val);
- }
+ if (shmem_mapping(mapping)) {
+ page = find_get_entry(mapping, pgoff);
+ if (radix_tree_exceptional_entry(page)) {
+ swp_entry_t swp = radix_to_swp_entry(page);
+ if (do_swap_account)
+ *entry = swp;
+ page = find_get_page(swap_address_space(swp), swp.val);
+ }
+ } else
+ page = find_get_page(mapping, pgoff);
+#else
+ page = find_get_page(mapping, pgoff);
#endif
return page;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 35ef28acf137..9ccef39a9de2 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1081,15 +1081,16 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
return 0;
} else if (PageHuge(hpage)) {
/*
- * Check "just unpoisoned", "filter hit", and
- * "race with other subpage."
+ * Check "filter hit" and "race with other subpage."
*/
lock_page(hpage);
- if (!PageHWPoison(hpage)
- || (hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != hpage && TestSetPageHWPoison(hpage))) {
- atomic_long_sub(nr_pages, &num_poisoned_pages);
- return 0;
+ if (PageHWPoison(hpage)) {
+ if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
+ || (p != hpage && TestSetPageHWPoison(hpage))) {
+ atomic_long_sub(nr_pages, &num_poisoned_pages);
+ unlock_page(hpage);
+ return 0;
+ }
}
set_page_hwpoison_huge_page(hpage);
res = dequeue_hwpoisoned_huge_page(hpage);
@@ -1152,6 +1153,8 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
*/
if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
+ atomic_long_sub(nr_pages, &num_poisoned_pages);
+ put_page(hpage);
res = 0;
goto out;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 0843feb66f3d..05f1180e9f21 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -194,10 +194,17 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
break;
if (pmd_trans_huge(*old_pmd)) {
int err = 0;
- if (extent == HPAGE_PMD_SIZE)
+ if (extent == HPAGE_PMD_SIZE) {
+ VM_BUG_ON(vma->vm_file || !vma->anon_vma);
+ /* See comment in move_ptes() */
+ if (need_rmap_locks)
+ anon_vma_lock_write(vma->anon_vma);
err = move_huge_pmd(vma, new_vma, old_addr,
new_addr, old_end,
old_pmd, new_pmd);
+ if (need_rmap_locks)
+ anon_vma_unlock_write(vma->anon_vma);
+ }
if (err > 0) {
need_flush = true;
continue;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ef413492a149..a4317da60532 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -593,14 +593,14 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
* (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
* => fast response on large errors; small oscillation near setpoint
*/
-static inline long long pos_ratio_polynom(unsigned long setpoint,
+static long long pos_ratio_polynom(unsigned long setpoint,
unsigned long dirty,
unsigned long limit)
{
long long pos_ratio;
long x;
- x = div_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
+ x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
limit - setpoint + 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -842,7 +842,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
x_intercept = bdi_setpoint + span;
if (bdi_dirty < x_intercept - span / 4) {
- pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
+ pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
x_intercept - bdi_setpoint + 1);
} else
pos_ratio /= 4;
diff --git a/mm/percpu.c b/mm/percpu.c
index 63e24fb4387b..2ddf9a990dbd 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -610,7 +610,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
sizeof(chunk->map[0]));
if (!chunk->map) {
- kfree(chunk);
+ pcpu_mem_free(chunk, pcpu_chunk_struct_size);
return NULL;
}
diff --git a/mm/slab.c b/mm/slab.c
index 388cb1ae6fbc..19d92181ce24 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -166,7 +166,7 @@ typedef unsigned char freelist_idx_t;
typedef unsigned short freelist_idx_t;
#endif
-#define SLAB_OBJ_MAX_NUM (1 << sizeof(freelist_idx_t) * BITS_PER_BYTE)
+#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
/*
* true if a page was allocated from pfmemalloc reserves for network-based
@@ -2572,13 +2572,13 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
return freelist;
}
-static inline freelist_idx_t get_free_obj(struct page *page, unsigned char idx)
+static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx)
{
return ((freelist_idx_t *)page->freelist)[idx];
}
static inline void set_free_obj(struct page *page,
- unsigned char idx, freelist_idx_t val)
+ unsigned int idx, freelist_idx_t val)
{
((freelist_idx_t *)(page->freelist))[idx] = val;
}
diff --git a/mm/slab.h b/mm/slab.h
index 3045316b7c9d..6bd4c353704f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -91,6 +91,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
int __kmem_cache_shutdown(struct kmem_cache *);
+void slab_kmem_cache_release(struct kmem_cache *);
struct seq_file;
struct file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f3cfccf76dda..102cc6fca3d3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -323,6 +323,12 @@ static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
}
#endif /* CONFIG_MEMCG_KMEM */
+void slab_kmem_cache_release(struct kmem_cache *s)
+{
+ kfree(s->name);
+ kmem_cache_free(kmem_cache, s);
+}
+
void kmem_cache_destroy(struct kmem_cache *s)
{
get_online_cpus();
@@ -352,8 +358,11 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_barrier();
memcg_free_cache_params(s);
- kfree(s->name);
- kmem_cache_free(kmem_cache, s);
+#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_remove(s);
+#else
+ slab_kmem_cache_release(s);
+#endif
goto out_put_cpus;
out_unlock:
diff --git a/mm/slub.c b/mm/slub.c
index 5e234f1f8853..2b1ce697fc4b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,14 +210,11 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
#ifdef CONFIG_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
-static void sysfs_slab_remove(struct kmem_cache *);
static void memcg_propagate_slab_attrs(struct kmem_cache *s);
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; }
-static inline void sysfs_slab_remove(struct kmem_cache *s) { }
-
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
#endif
@@ -3238,24 +3235,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int __kmem_cache_shutdown(struct kmem_cache *s)
{
- int rc = kmem_cache_close(s);
-
- if (!rc) {
- /*
- * Since slab_attr_store may take the slab_mutex, we should
- * release the lock while removing the sysfs entry in order to
- * avoid a deadlock. Because this is pretty much the last
- * operation we do and the lock will be released shortly after
- * that in slab_common.c, we could just move sysfs_slab_remove
- * to a later point in common code. We should do that when we
- * have a common sysfs framework for all allocators.
- */
- mutex_unlock(&slab_mutex);
- sysfs_slab_remove(s);
- mutex_lock(&slab_mutex);
- }
-
- return rc;
+ return kmem_cache_close(s);
}
/********************************************************************
@@ -5071,15 +5051,18 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
#ifdef CONFIG_MEMCG_KMEM
int i;
char *buffer = NULL;
+ struct kmem_cache *root_cache;
- if (!is_root_cache(s))
+ if (is_root_cache(s))
return;
+ root_cache = s->memcg_params->root_cache;
+
/*
* This mean this cache had no attribute written. Therefore, no point
* in copying default values around
*/
- if (!s->max_attr_size)
+ if (!root_cache->max_attr_size)
return;
for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
@@ -5101,7 +5084,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
*/
if (buffer)
buf = buffer;
- else if (s->max_attr_size < ARRAY_SIZE(mbuf))
+ else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
buf = mbuf;
else {
buffer = (char *) get_zeroed_page(GFP_KERNEL);
@@ -5110,7 +5093,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
buf = buffer;
}
- attr->show(s->memcg_params->root_cache, buf);
+ attr->show(root_cache, buf);
attr->store(s, buf, strlen(buf));
}
@@ -5119,6 +5102,11 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s)
#endif
}
+static void kmem_cache_release(struct kobject *k)
+{
+ slab_kmem_cache_release(to_slab(k));
+}
+
static const struct sysfs_ops slab_sysfs_ops = {
.show = slab_attr_show,
.store = slab_attr_store,
@@ -5126,6 +5114,7 @@ static const struct sysfs_ops slab_sysfs_ops = {
static struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
+ .release = kmem_cache_release,
};
static int uevent_filter(struct kset *kset, struct kobject *kobj)
@@ -5252,7 +5241,7 @@ out_put_kobj:
goto out;
}
-static void sysfs_slab_remove(struct kmem_cache *s)
+void sysfs_slab_remove(struct kmem_cache *s)
{
if (slab_state < FULL)
/*
diff --git a/mm/truncate.c b/mm/truncate.c
index e5cc39ab0751..6a78c814bebf 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -484,14 +484,6 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
unsigned long count = 0;
int i;
- /*
- * Note: this function may get called on a shmem/tmpfs mapping:
- * pagevec_lookup() might then return 0 prematurely (because it
- * got a gangful of swap entries); but it's hardly worth worrying
- * about - it can rarely have anything to free from such a mapping
- * (most pages are dirty), and already skips over any difficulties.
- */
-
pagevec_init(&pvec, 0);
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
diff --git a/mm/util.c b/mm/util.c
index f380af7ea779..d5ea733c5082 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -10,6 +10,7 @@
#include <linux/swapops.h>
#include <linux/mman.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
#include <asm/uaccess.h>
@@ -387,6 +388,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
}
EXPORT_SYMBOL(vm_mmap);
+void kvfree(const void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+EXPORT_SYMBOL(kvfree);
+
struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3f56c8deb3c0..32c661d66a45 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1916,6 +1916,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
get_lru_size(lruvec, LRU_INACTIVE_FILE);
/*
+ * Prevent the reclaimer from falling into the cache trap: as
+ * cache pages start out inactive, every cache fault will tip
+ * the scan balance towards the file LRU. And as the file LRU
+ * shrinks, so does the window for rotation from references.
+ * This means we have a runaway feedback loop where a tiny
+ * thrashing file LRU becomes infinitely more attractive than
+ * anon pages. Try to detect this based on file LRU size.
+ */
+ if (global_reclaim(sc)) {
+ unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
+
+ if (unlikely(file + free <= high_wmark_pages(zone))) {
+ scan_balance = SCAN_ANON;
+ goto out;
+ }
+ }
+
+ /*
* There is enough inactive page cache, do not reclaim
* anything from the anonymous working set right now.
*/
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 175273f38cb1..44ebd5c2cd4a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
if (err < 0)
goto out_uninit_mvrp;
+ vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
err = register_netdevice(dev);
if (err < 0)
goto out_uninit_mvrp;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4181fb71ba77..ad2ac3c00398 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
}
}
-static int vlan_calculate_locking_subclass(struct net_device *real_dev)
-{
- int subclass = 0;
-
- while (is_vlan_dev(real_dev)) {
- subclass++;
- real_dev = vlan_dev_priv(real_dev)->real_dev;
- }
-
- return subclass;
-}
-
-static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
-{
- int err = 0, subclass;
-
- subclass = vlan_calculate_locking_subclass(to);
-
- spin_lock_nested(&to->addr_list_lock, subclass);
- err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
- if (!err)
- __dev_set_rx_mode(to);
- spin_unlock(&to->addr_list_lock);
-}
-
-static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
-{
- int err = 0, subclass;
-
- subclass = vlan_calculate_locking_subclass(to);
-
- spin_lock_nested(&to->addr_list_lock, subclass);
- err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
- if (!err)
- __dev_set_rx_mode(to);
- spin_unlock(&to->addr_list_lock);
-}
-
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
{
- vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
- vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
}
/*
@@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
}
+static int vlan_dev_get_lock_subclass(struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->nest_level;
+}
+
static const struct header_ops vlan_header_ops = {
.create = vlan_dev_hard_header,
.rebuild = vlan_dev_rebuild_header,
@@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- int subclass = 0;
netif_carrier_off(dev);
@@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- subclass = vlan_calculate_locking_subclass(dev);
- vlan_dev_set_lockdep_class(dev, subclass);
+ vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@ -817,6 +782,7 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
#endif
.ndo_fix_features = vlan_dev_fix_features,
+ .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
};
void vlan_setup(struct net_device *dev)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index b3bd4ec3fd94..f04224c32005 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1545,6 +1545,8 @@ out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
batadv_orig_node_free_ref(orig_neigh_node);
out:
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_free_ref(router_ifinfo);
if (router)
batadv_neigh_node_free_ref(router);
if (router_router)
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 60889df808f3..dcd99b2bea3c 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -941,8 +941,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
* additional DAT answer may trigger kernel warnings about
* a packet coming from the wrong port.
*/
- if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
- BATADV_NO_FLAGS)) {
+ if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) {
ret = true;
goto out;
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index bcc4bea632fa..f14e54a05691 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -418,12 +418,13 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node)
{
struct batadv_priv *bat_priv;
- struct batadv_hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if = NULL;
struct batadv_frag_packet frag_header;
struct sk_buff *skb_fragment;
unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
unsigned header_size = sizeof(frag_header);
unsigned max_fragment_size, max_packet_size;
+ bool ret = false;
/* To avoid merge and refragmentation at next-hops we never send
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
@@ -483,7 +484,11 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
skb->len + ETH_HLEN);
batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
- return true;
+ ret = true;
+
out_err:
- return false;
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
+
+ return ret;
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index c835e137423b..90cff585b37d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -42,8 +42,10 @@
static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
{
- if (atomic_dec_and_test(&gw_node->refcount))
+ if (atomic_dec_and_test(&gw_node->refcount)) {
+ batadv_orig_node_free_ref(gw_node->orig_node);
kfree_rcu(gw_node, rcu);
+ }
}
static struct batadv_gw_node *
@@ -406,9 +408,14 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
if (gateway->bandwidth_down == 0)
return;
+ if (!atomic_inc_not_zero(&orig_node->refcount))
+ return;
+
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
- if (!gw_node)
+ if (!gw_node) {
+ batadv_orig_node_free_ref(orig_node);
return;
+ }
INIT_HLIST_NODE(&gw_node->list);
gw_node->orig_node = orig_node;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b851cc580853..fbda6b54baff 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -83,7 +83,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
return true;
/* no more parents..stop recursion */
- if (net_dev->iflink == net_dev->ifindex)
+ if (net_dev->iflink == 0 || net_dev->iflink == net_dev->ifindex)
return false;
/* recurse over the parent device */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ffd9dfbd9b0e..6a484514cd3e 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -501,12 +501,17 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
{
struct batadv_orig_ifinfo *orig_ifinfo;
+ struct batadv_neigh_node *router;
orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
+ /* this is the last reference to this object */
+ router = rcu_dereference_protected(orig_ifinfo->router, true);
+ if (router)
+ batadv_neigh_node_free_ref_now(router);
kfree(orig_ifinfo);
}
@@ -702,6 +707,47 @@ free_orig_node:
}
/**
+ * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
+ * @bat_priv: the bat priv with all the soft interface information
+ * @neigh: orig node which is to be checked
+ */
+static void
+batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
+ struct batadv_neigh_node *neigh)
+{
+ struct batadv_neigh_ifinfo *neigh_ifinfo;
+ struct batadv_hard_iface *if_outgoing;
+ struct hlist_node *node_tmp;
+
+ spin_lock_bh(&neigh->ifinfo_lock);
+
+ /* for all ifinfo objects for this neighinator */
+ hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+ &neigh->ifinfo_list, list) {
+ if_outgoing = neigh_ifinfo->if_outgoing;
+
+ /* always keep the default interface */
+ if (if_outgoing == BATADV_IF_DEFAULT)
+ continue;
+
+ /* don't purge if the interface is not (going) down */
+ if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
+ (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
+ (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
+ continue;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
+ neigh->addr, if_outgoing->net_dev->name);
+
+ hlist_del_rcu(&neigh_ifinfo->list);
+ batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+ }
+
+ spin_unlock_bh(&neigh->ifinfo_lock);
+}
+
+/**
* batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: orig node which is to be checked
@@ -800,6 +846,11 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
hlist_del_rcu(&neigh_node->list);
batadv_neigh_node_free_ref(neigh_node);
+ } else {
+ /* only necessary if not the whole neighbor is to be
+ * deleted, but some interface has been removed.
+ */
+ batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
}
}
@@ -857,7 +908,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
{
struct batadv_neigh_node *best_neigh_node;
struct batadv_hard_iface *hard_iface;
- bool changed;
+ bool changed_ifinfo, changed_neigh;
if (batadv_has_timed_out(orig_node->last_seen,
2 * BATADV_PURGE_TIMEOUT)) {
@@ -867,10 +918,10 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
jiffies_to_msecs(orig_node->last_seen));
return true;
}
- changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
- changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
+ changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
+ changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
- if (!changed)
+ if (!changed_ifinfo && !changed_neigh)
return false;
/* first for NULL ... */
@@ -1028,7 +1079,8 @@ int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
out:
- batadv_hardif_free_ref(hard_iface);
+ if (hard_iface)
+ batadv_hardif_free_ref(hard_iface);
return 0;
}
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dac7f9b98687..1948d592aa54 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
return r;
}
-static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, bool more)
{
int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
return ret;
}
+static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
+ int offset, size_t size, bool more)
+{
+ int ret;
+ struct kvec iov;
+
+ /* sendpage cannot properly handle pages with page_count == 0,
+ * we need to fallback to sendmsg if that's the case */
+ if (page_count(page) >= 1)
+ return __ceph_tcp_sendpage(sock, page, offset, size, more);
+
+ iov.iov_base = kmap(page) + offset;
+ iov.iov_len = size;
+ ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
+ kunmap(page);
+
+ return ret;
+}
/*
* Shutdown/close the socket for the given connection.
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 8b8a5a24b223..c547e46084d3 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -329,6 +329,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
dout("crush decode tunable chooseleaf_descend_once = %d",
c->chooseleaf_descend_once);
+ ceph_decode_need(p, end, sizeof(u8), done);
+ c->chooseleaf_vary_r = ceph_decode_8(p);
+ dout("crush decode tunable chooseleaf_vary_r = %d",
+ c->chooseleaf_vary_r);
+
done:
dout("crush_decode success\n");
return c;
diff --git a/net/core/dev.c b/net/core/dev.c
index 867adb25b5b8..0355ca5d2924 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3957,6 +3957,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
}
NAPI_GRO_CB(skb)->count = 1;
NAPI_GRO_CB(skb)->age = jiffies;
+ NAPI_GRO_CB(skb)->last = skb;
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
skb->next = napi->gro_list;
napi->gro_list = skb;
@@ -4547,6 +4548,32 @@ void *netdev_adjacent_get_private(struct list_head *adj_list)
EXPORT_SYMBOL(netdev_adjacent_get_private);
/**
+ * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
+{
+ struct netdev_adjacent *upper;
+
+ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
+
+ upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+ if (&upper->list == &dev->adj_list.upper)
+ return NULL;
+
+ *iter = &upper->list;
+
+ return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
+/**
* netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
* @dev: device
* @iter: list_head ** of the current position
@@ -4628,6 +4655,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/**
+ * netdev_lower_get_next - Get the next device from the lower neighbour
+ * list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RTNL lock or
+ * its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
+{
+ struct netdev_adjacent *lower;
+
+ lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+ if (&lower->list == &dev->adj_list.lower)
+ return NULL;
+
+ *iter = &lower->list;
+
+ return lower->dev;
+}
+EXPORT_SYMBOL(netdev_lower_get_next);
+
+/**
* netdev_lower_get_first_private_rcu - Get the first ->private from the
* lower neighbour list, RCU
* variant
@@ -5077,6 +5130,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
}
EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
+int dev_get_nest_level(struct net_device *dev,
+ bool (*type_check)(struct net_device *dev))
+{
+ struct net_device *lower = NULL;
+ struct list_head *iter;
+ int max_nest = -1;
+ int nest;
+
+ ASSERT_RTNL();
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ nest = dev_get_nest_level(lower, type_check);
+ if (max_nest < nest)
+ max_nest = nest;
+ }
+
+ if (type_check(dev))
+ max_nest++;
+
+ return max_nest;
+}
+EXPORT_SYMBOL(dev_get_nest_level);
+
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -5242,7 +5319,6 @@ void __dev_set_rx_mode(struct net_device *dev)
if (ops->ndo_set_rx_mode)
ops->ndo_set_rx_mode(dev);
}
-EXPORT_SYMBOL(__dev_set_rx_mode);
void dev_set_rx_mode(struct net_device *dev)
{
@@ -5547,7 +5623,7 @@ static int dev_new_index(struct net *net)
/* Delayed registration/unregisteration */
static LIST_HEAD(net_todo_list);
-static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
+DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
static void net_set_todo(struct net_device *dev)
{
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8f8a96ef9f3f..32d872eec7f5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1248,8 +1248,8 @@ void __neigh_set_probe_once(struct neighbour *neigh)
neigh->updated = jiffies;
if (!(neigh->nud_state & NUD_FAILED))
return;
- neigh->nud_state = NUD_PROBE;
- atomic_set(&neigh->probes, NEIGH_VAR(neigh->parms, UCAST_PROBES));
+ neigh->nud_state = NUD_INCOMPLETE;
+ atomic_set(&neigh->probes, neigh_max_probes(neigh));
neigh_add_timer(neigh,
jiffies + NEIGH_VAR(neigh->parms, RETRANS_TIME));
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 05e949d48204..85b62691f4f2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -24,7 +24,7 @@
static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
-static DEFINE_MUTEX(net_mutex);
+DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d6417464dc66..f31268dbc0d1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -353,15 +353,46 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+/* Return with the rtnl_lock held when there are no network
+ * devices unregistering in any network namespace.
+ */
+static void rtnl_lock_unregistering_all(void)
+{
+ struct net *net;
+ bool unregistering;
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(&netdev_unregistering_wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ unregistering = false;
+ rtnl_lock();
+ for_each_net(net) {
+ if (net->dev_unreg_count > 0) {
+ unregistering = true;
+ break;
+ }
+ }
+ if (!unregistering)
+ break;
+ __rtnl_unlock();
+ schedule();
+ }
+ finish_wait(&netdev_unregistering_wq, &wait);
+}
+
/**
* rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
* @ops: struct rtnl_link_ops * to unregister
*/
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
- rtnl_lock();
+ /* Close the race with cleanup_net() */
+ mutex_lock(&net_mutex);
+ rtnl_lock_unregistering_all();
__rtnl_link_unregister(ops);
rtnl_unlock();
+ mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3d74530ae82b..3f6c7e8be8a4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3076,7 +3076,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
if (unlikely(p->len + len >= 65536))
return -E2BIG;
- lp = NAPI_GRO_CB(p)->last ?: p;
+ lp = NAPI_GRO_CB(p)->last;
pinfo = skb_shinfo(lp);
if (headlen <= offset) {
@@ -3192,7 +3192,7 @@ merge:
__skb_pull(skb, offset);
- if (!NAPI_GRO_CB(p)->last)
+ if (NAPI_GRO_CB(p)->last == p)
skb_shinfo(p)->frag_list = skb;
else
NAPI_GRO_CB(p)->last->next = skb;
diff --git a/net/core/utils.c b/net/core/utils.c
index 2f737bf90b3f..eed34338736c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,8 +348,8 @@ static void __net_random_once_deferred(struct work_struct *w)
{
struct __net_random_once_work *work =
container_of(w, struct __net_random_once_work, work);
- if (!static_key_enabled(work->key))
- static_key_slow_inc(work->key);
+ BUG_ON(!static_key_enabled(work->key));
+ static_key_slow_dec(work->key);
kfree(work);
}
@@ -367,7 +367,7 @@ static void __net_random_once_disable_jump(struct static_key *key)
}
bool __net_get_random_once(void *buf, int nbytes, bool *done,
- struct static_key *done_key)
+ struct static_key *once_key)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
@@ -382,7 +382,7 @@ bool __net_get_random_once(void *buf, int nbytes, bool *done,
*done = true;
spin_unlock_irqrestore(&lock, flags);
- __net_random_once_disable_jump(done_key);
+ __net_random_once_disable_jump(once_key);
return true;
}
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0eb5d5e76dfb..5db37cef50a9 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -406,8 +406,9 @@ static int dsa_of_probe(struct platform_device *pdev)
goto out_free;
}
- chip_index = 0;
+ chip_index = -1;
for_each_available_child_of_node(np, child) {
+ chip_index++;
cd = &pd->chip[chip_index];
cd->mii_bus = &mdio_bus->dev;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 289c6ee388c1..86a00bd6684c 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -539,9 +539,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
unsigned int max_headroom; /* The extra header space needed */
__be32 dst;
int err;
- bool connected = true;
+ bool connected;
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ connected = (tunnel->parms.iph.daddr != 0);
dst = tnl_params->daddr;
if (dst == 0) {
@@ -879,6 +880,7 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
*/
if (!IS_ERR(itn->fb_tunnel_dev)) {
itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev);
ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
}
rtnl_unlock();
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index afcee51b90ed..13ef00f1e17b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -239,6 +239,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
static int vti4_err(struct sk_buff *skb, u32 info)
{
__be32 spi;
+ __u32 mark;
struct xfrm_state *x;
struct ip_tunnel *tunnel;
struct ip_esp_hdr *esph;
@@ -254,6 +255,8 @@ static int vti4_err(struct sk_buff *skb, u32 info)
if (!tunnel)
return -1;
+ mark = be32_to_cpu(tunnel->parms.o_key);
+
switch (protocol) {
case IPPROTO_ESP:
esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
@@ -281,7 +284,7 @@ static int vti4_err(struct sk_buff *skb, u32 info)
return 0;
}
- x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+ x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
spi, protocol, AF_INET);
if (!x)
return 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 50e1e0feddfc..4154eb76b0ad 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1526,7 +1526,7 @@ static int __mkroute_input(struct sk_buff *skb,
struct in_device *out_dev;
unsigned int flags = 0;
bool do_cache;
- u32 itag;
+ u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 8e8c018d9d2d..d5f6bd9a210a 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -62,10 +62,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
if (err)
return err;
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
-
- skb->protocol = htons(ETH_P_IP);
+ IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
return x->outer_mode->output2(x, skb);
}
@@ -73,27 +70,34 @@ EXPORT_SYMBOL(xfrm4_prepare_output);
int xfrm4_output_finish(struct sk_buff *skb)
{
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ skb->protocol = htons(ETH_P_IP);
+
+#ifdef CONFIG_NETFILTER
+ IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+#endif
+
+ return xfrm_output(skb);
+}
+
+static int __xfrm4_output(struct sk_buff *skb)
+{
+ struct xfrm_state *x = skb_dst(skb)->xfrm;
+
#ifdef CONFIG_NETFILTER
- if (!skb_dst(skb)->xfrm) {
+ if (!x) {
IPCB(skb)->flags |= IPSKB_REROUTED;
return dst_output(skb);
}
-
- IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
#endif
- skb->protocol = htons(ETH_P_IP);
- return xfrm_output(skb);
+ return x->outer_mode->afinfo->output_finish(skb);
}
int xfrm4_output(struct sock *sk, struct sk_buff *skb)
{
- struct dst_entry *dst = skb_dst(skb);
- struct xfrm_state *x = dst->xfrm;
-
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
- NULL, dst->dev,
- x->outer_mode->afinfo->output_finish,
+ NULL, skb_dst(skb)->dev, __xfrm4_output,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 7f7b243e8139..a2ce0101eaac 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -50,8 +50,12 @@ int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
{
int ret;
struct xfrm4_protocol *handler;
+ struct xfrm4_protocol __rcu **head = proto_handlers(protocol);
- for_each_protocol_rcu(*proto_handlers(protocol), handler)
+ if (!head)
+ return 0;
+
+ for_each_protocol_rcu(*head, handler)
if ((ret = handler->cb_handler(skb, err)) <= 0)
return ret;
@@ -64,15 +68,20 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
{
int ret;
struct xfrm4_protocol *handler;
+ struct xfrm4_protocol __rcu **head = proto_handlers(nexthdr);
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
- for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+ if (!head)
+ goto out;
+
+ for_each_protocol_rcu(*head, handler)
if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
return ret;
+out:
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
kfree_skb(skb);
@@ -208,6 +217,9 @@ int xfrm4_protocol_register(struct xfrm4_protocol *handler,
int ret = -EEXIST;
int priority = handler->priority;
+ if (!proto_handlers(protocol) || !netproto(protocol))
+ return -EINVAL;
+
mutex_lock(&xfrm4_protocol_mutex);
if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -250,6 +262,9 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
struct xfrm4_protocol *t;
int ret = -ENOENT;
+ if (!proto_handlers(protocol) || !netproto(protocol))
+ return -EINVAL;
+
mutex_lock(&xfrm4_protocol_mutex);
for (pprev = proto_handlers(protocol);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 59f95affceb0..b2f091566f88 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -196,7 +196,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
unsigned int off;
u16 flush = 1;
int proto;
- __wsum csum;
off = skb_gro_offset(skb);
hlen = off + sizeof(*iph);
@@ -264,13 +263,10 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
NAPI_GRO_CB(skb)->flush |= flush;
- csum = skb->csum;
- skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
+ skb_gro_postpull_rcsum(skb, iph, nlen);
pp = ops->callbacks.gro_receive(head, skb);
- skb->csum = csum;
-
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ab0cc57f779c..85aaeca1f7f3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1229,7 +1229,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
unsigned int maxnonfragsize, headersize;
headersize = sizeof(struct ipv6hdr) +
- (opt ? opt->tot_len : 0) +
+ (opt ? opt->opt_flen + opt->opt_nflen : 0) +
(dst_allfrag(&rt->dst) ?
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index fe61545dde71..afa082458360 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1558,7 +1558,7 @@ static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[])
{
u8 proto;
- if (!data)
+ if (!data || !data[IFLA_IPTUN_PROTO])
return 0;
proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 2953c0c26c27..9aaa6bb229e4 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -511,6 +511,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
__be32 spi;
+ __u32 mark;
struct xfrm_state *x;
struct ip6_tnl *t;
struct ip_esp_hdr *esph;
@@ -524,6 +525,8 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!t)
return -1;
+ mark = be32_to_cpu(t->parms.o_key);
+
switch (protocol) {
case IPPROTO_ESP:
esph = (struct ip_esp_hdr *)(skb->data + offset);
@@ -545,7 +548,7 @@ static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
type != NDISC_REDIRECT)
return 0;
- x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+ x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr,
spi, protocol, AF_INET6);
if (!x)
return 0;
@@ -1094,7 +1097,6 @@ static int __init vti6_tunnel_init(void)
err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
if (err < 0) {
- unregister_pernet_device(&vti6_net_ops);
pr_err("%s: can't register vti6 protocol\n", __func__);
goto out;
@@ -1103,7 +1105,6 @@ static int __init vti6_tunnel_init(void)
err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
if (err < 0) {
xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
- unregister_pernet_device(&vti6_net_ops);
pr_err("%s: can't register vti6 protocol\n", __func__);
goto out;
@@ -1113,7 +1114,6 @@ static int __init vti6_tunnel_init(void)
if (err < 0) {
xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
- unregister_pernet_device(&vti6_net_ops);
pr_err("%s: can't register vti6 protocol\n", __func__);
goto out;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 09a22f4f36c9..ca8d4ea48a5d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -851,7 +851,7 @@ out:
static void ndisc_recv_na(struct sk_buff *skb)
{
struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
- const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
+ struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
u8 *lladdr = NULL;
u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) +
@@ -944,10 +944,7 @@ static void ndisc_recv_na(struct sk_buff *skb)
/*
* Change: router to host
*/
- struct rt6_info *rt;
- rt = rt6_get_dflt_router(saddr, dev);
- if (rt)
- ip6_del_rt(rt);
+ rt6_clean_tohost(dev_net(dev), saddr);
}
out:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index aa883afa652d..f23fbd28a501 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2234,6 +2234,27 @@ void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
fib6_clean_all(net, fib6_remove_prefsrc, &adni);
}
+#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
+#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
+
+/* Remove routers and update dst entries when gateway turn into host. */
+static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
+{
+ struct in6_addr *gateway = (struct in6_addr *)arg;
+
+ if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
+ ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
+ ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
+ return -1;
+ }
+ return 0;
+}
+
+void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
+{
+ fib6_clean_all(net, fib6_clean_tohost, gateway);
+}
+
struct arg_dev_net {
struct net_device *dev;
struct net *net;
@@ -2709,6 +2730,9 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
if (tb[RTA_OIF])
oif = nla_get_u32(tb[RTA_OIF]);
+ if (tb[RTA_MARK])
+ fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
+
if (iif) {
struct net_device *dev;
int flags = 0;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 0d78132ff18a..8517d3cd1aed 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -42,7 +42,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
if (NAPI_GRO_CB(skb)->flush)
goto skip_csum;
- wsum = skb->csum;
+ wsum = NAPI_GRO_CB(skb)->csum;
switch (skb->ip_summed) {
case CHECKSUM_NONE:
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index f47c8b153dd3..433672d07d0b 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -114,12 +114,6 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
if (err)
return err;
- memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
- IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
-
- skb->protocol = htons(ETH_P_IPV6);
skb->ignore_df = 1;
return x->outer_mode->output2(x, skb);
@@ -128,11 +122,13 @@ EXPORT_SYMBOL(xfrm6_prepare_output);
int xfrm6_output_finish(struct sk_buff *skb)
{
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ skb->protocol = htons(ETH_P_IPV6);
+
#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
#endif
- skb->protocol = htons(ETH_P_IPV6);
return xfrm_output(skb);
}
@@ -142,6 +138,13 @@ static int __xfrm6_output(struct sk_buff *skb)
struct xfrm_state *x = dst->xfrm;
int mtu;
+#ifdef CONFIG_NETFILTER
+ if (!x) {
+ IP6CB(skb)->flags |= IP6SKB_REROUTED;
+ return dst_output(skb);
+ }
+#endif
+
if (skb->protocol == htons(ETH_P_IPV6))
mtu = ip6_skb_dst_mtu(skb);
else
@@ -165,6 +168,7 @@ static int __xfrm6_output(struct sk_buff *skb)
int xfrm6_output(struct sock *sk, struct sk_buff *skb)
{
- return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
- skb_dst(skb)->dev, __xfrm6_output);
+ return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
+ NULL, skb_dst(skb)->dev, __xfrm6_output,
+ !(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index 6ab989c486f7..54d13f8dbbae 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -50,6 +50,10 @@ int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
{
int ret;
struct xfrm6_protocol *handler;
+ struct xfrm6_protocol __rcu **head = proto_handlers(protocol);
+
+ if (!head)
+ return 0;
for_each_protocol_rcu(*proto_handlers(protocol), handler)
if ((ret = handler->cb_handler(skb, err)) <= 0)
@@ -184,10 +188,12 @@ int xfrm6_protocol_register(struct xfrm6_protocol *handler,
struct xfrm6_protocol __rcu **pprev;
struct xfrm6_protocol *t;
bool add_netproto = false;
-
int ret = -EEXIST;
int priority = handler->priority;
+ if (!proto_handlers(protocol) || !netproto(protocol))
+ return -EINVAL;
+
mutex_lock(&xfrm6_protocol_mutex);
if (!rcu_dereference_protected(*proto_handlers(protocol),
@@ -230,6 +236,9 @@ int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
struct xfrm6_protocol *t;
int ret = -ENOENT;
+ if (!proto_handlers(protocol) || !netproto(protocol))
+ return -EINVAL;
+
mutex_lock(&xfrm6_protocol_mutex);
for (pprev = proto_handlers(protocol);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 01e77b0ae075..8c9d7302c846 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_lock_irqsave(&list->lock, flags);
while (list_skb != (struct sk_buff *)list) {
- if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
this = list_skb;
break;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b455f62d357a..487c2ef0982a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ struct ieee80211_roc_work {
bool started, abort, hw_begun, notified;
bool to_be_freed;
+ bool on_channel;
unsigned long hw_start_time;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 488826f188a7..bfb5e20796b6 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3598,18 +3598,24 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
sdata_lock(sdata);
- if (ifmgd->auth_data) {
+ if (ifmgd->auth_data || ifmgd->assoc_data) {
+ const u8 *bssid = ifmgd->auth_data ?
+ ifmgd->auth_data->bss->bssid :
+ ifmgd->assoc_data->bss->bssid;
+
/*
- * If we are trying to authenticate while suspending, cfg80211
- * won't know and won't actually abort those attempts, thus we
- * need to do that ourselves.
+ * If we are trying to authenticate / associate while suspending,
+ * cfg80211 won't know and won't actually abort those attempts,
+ * thus we need to do that ourselves.
*/
- ieee80211_send_deauth_disassoc(sdata,
- ifmgd->auth_data->bss->bssid,
+ ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DEAUTH_LEAVING,
false, frame_buf);
- ieee80211_destroy_auth_data(sdata, false);
+ if (ifmgd->assoc_data)
+ ieee80211_destroy_assoc_data(sdata, false);
+ if (ifmgd->auth_data)
+ ieee80211_destroy_auth_data(sdata, false);
cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
IEEE80211_DEAUTH_FRAME_LEN);
}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 6fb38558a5e6..7a17decd27f9 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -333,7 +333,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)
container_of(work, struct ieee80211_roc_work, work.work);
struct ieee80211_sub_if_data *sdata = roc->sdata;
struct ieee80211_local *local = sdata->local;
- bool started;
+ bool started, on_channel;
mutex_lock(&local->mtx);
@@ -354,14 +354,26 @@ void ieee80211_sw_roc_work(struct work_struct *work)
if (!roc->started) {
struct ieee80211_roc_work *dep;
- /* start this ROC */
- ieee80211_offchannel_stop_vifs(local);
+ WARN_ON(local->use_chanctx);
+
+ /* If actually operating on the desired channel (with at least
+ * 20 MHz channel width) don't stop all the operations but still
+ * treat it as though the ROC operation started properly, so
+ * other ROC operations won't interfere with this one.
+ */
+ roc->on_channel = roc->chan == local->_oper_chandef.chan &&
+ local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
+ local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
- /* switch channel etc */
+ /* start this ROC */
ieee80211_recalc_idle(local);
- local->tmp_channel = roc->chan;
- ieee80211_hw_config(local, 0);
+ if (!roc->on_channel) {
+ ieee80211_offchannel_stop_vifs(local);
+
+ local->tmp_channel = roc->chan;
+ ieee80211_hw_config(local, 0);
+ }
/* tell userspace or send frame */
ieee80211_handle_roc_started(roc);
@@ -380,9 +392,10 @@ void ieee80211_sw_roc_work(struct work_struct *work)
finish:
list_del(&roc->list);
started = roc->started;
+ on_channel = roc->on_channel;
ieee80211_roc_notify_destroy(roc, !roc->abort);
- if (started) {
+ if (started && !on_channel) {
ieee80211_flush_queues(local, NULL);
local->tmp_channel = NULL;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index a0b0aea76525..cec5b60487a4 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -21,10 +21,10 @@
#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
__field(bool, p2p) \
- __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+ __string(vif_name, sdata->name)
#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
__entry->p2p = sdata->vif.p2p; \
- __assign_str(vif_name, sdata->dev ? sdata->dev->name : sdata->name)
+ __assign_str(vif_name, sdata->name)
#define VIF_PR_FMT " vif:%s(%d%s)"
#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type, __entry->p2p ? "/p2p" : ""
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index e9e36a256165..9265adfdabfc 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -129,9 +129,12 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
if (!vht_cap_ie || !sband->vht_cap.vht_supported)
return;
- /* A VHT STA must support 40 MHz */
- if (!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
- return;
+ /*
+ * A VHT STA must support 40 MHz, but if we verify that here
+ * then we break a few things - some APs (e.g. Netgear R6300v2
+ * and others based on the BCM4360 chipset) will unset this
+ * capability bit when operating in 20 MHz.
+ */
vht_cap->vht_supported = true;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 804105391b9a..345acfb1720b 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -66,20 +66,6 @@ struct nft_jumpstack {
int rulenum;
};
-static inline void
-nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
- struct nft_jumpstack *jumpstack, unsigned int stackptr)
-{
- struct nft_stats __percpu *stats;
- const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
-
- rcu_read_lock_bh();
- stats = rcu_dereference(nft_base_chain(chain)->stats);
- __this_cpu_inc(stats->pkts);
- __this_cpu_add(stats->bytes, pkt->skb->len);
- rcu_read_unlock_bh();
-}
-
enum nft_trace {
NFT_TRACE_RULE,
NFT_TRACE_RETURN,
@@ -117,13 +103,14 @@ static void nft_trace_packet(const struct nft_pktinfo *pkt,
unsigned int
nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
{
- const struct nft_chain *chain = ops->priv;
+ const struct nft_chain *chain = ops->priv, *basechain = chain;
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
struct nft_data data[NFT_REG_MAX + 1];
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
- int rulenum = 0;
+ struct nft_stats __percpu *stats;
+ int rulenum;
/*
* Cache cursor to avoid problems in case that the cursor is updated
* while traversing the ruleset.
@@ -131,6 +118,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
do_chain:
+ rulenum = 0;
rule = list_entry(&chain->rules, struct nft_rule, list);
next_rule:
data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
@@ -156,8 +144,10 @@ next_rule:
switch (data[NFT_REG_VERDICT].verdict) {
case NFT_BREAK:
data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
- /* fall through */
+ continue;
case NFT_CONTINUE:
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
continue;
}
break;
@@ -183,37 +173,44 @@ next_rule:
jumpstack[stackptr].rule = rule;
jumpstack[stackptr].rulenum = rulenum;
stackptr++;
- /* fall through */
+ chain = data[NFT_REG_VERDICT].chain;
+ goto do_chain;
case NFT_GOTO:
+ if (unlikely(pkt->skb->nf_trace))
+ nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
chain = data[NFT_REG_VERDICT].chain;
goto do_chain;
case NFT_RETURN:
if (unlikely(pkt->skb->nf_trace))
nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
-
- /* fall through */
+ break;
case NFT_CONTINUE:
+ if (unlikely(pkt->skb->nf_trace && !(chain->flags & NFT_BASE_CHAIN)))
+ nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
break;
default:
WARN_ON(1);
}
if (stackptr > 0) {
- if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
-
stackptr--;
chain = jumpstack[stackptr].chain;
rule = jumpstack[stackptr].rule;
rulenum = jumpstack[stackptr].rulenum;
goto next_rule;
}
- nft_chain_stats(chain, pkt, jumpstack, stackptr);
if (unlikely(pkt->skb->nf_trace))
- nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+ nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
+
+ rcu_read_lock_bh();
+ stats = rcu_dereference(nft_base_chain(basechain)->stats);
+ __this_cpu_inc(stats->pkts);
+ __this_cpu_add(stats->bytes, pkt->skb->len);
+ rcu_read_unlock_bh();
- return nft_base_chain(chain)->policy;
+ return nft_base_chain(basechain)->policy;
}
EXPORT_SYMBOL_GPL(nft_do_chain);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 7633a752c65e..0ad080790a32 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -99,7 +99,7 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
_debug("tktlen: %x", tktlen);
if (tktlen > AFSTOKEN_RK_TIX_MAX)
return -EKEYREJECTED;
- if (8 * 4 + tktlen != toklen)
+ if (toklen < 8 * 4 + tktlen)
return -EKEYREJECTED;
plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index d11d0a4fbe34..c721cd4a469f 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -188,6 +188,12 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
[TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
};
+static void tcindex_filter_result_init(struct tcindex_filter_result *r)
+{
+ memset(r, 0, sizeof(*r));
+ tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+}
+
static int
tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
u32 handle, struct tcindex_data *p,
@@ -207,15 +213,11 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
return err;
memcpy(&cp, p, sizeof(cp));
- memset(&new_filter_result, 0, sizeof(new_filter_result));
- tcf_exts_init(&new_filter_result.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ tcindex_filter_result_init(&new_filter_result);
+ tcindex_filter_result_init(&cr);
if (old_r)
- memcpy(&cr, r, sizeof(cr));
- else {
- memset(&cr, 0, sizeof(cr));
- tcf_exts_init(&cr.exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
- }
+ cr.res = r->res;
if (tb[TCA_TCINDEX_HASH])
cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -267,9 +269,14 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = -ENOMEM;
if (!cp.perfect && !cp.h) {
if (valid_perfect_hash(&cp)) {
+ int i;
+
cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
if (!cp.perfect)
goto errout;
+ for (i = 0; i < cp.hash; i++)
+ tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+ TCA_TCINDEX_POLICE);
balloc = 1;
} else {
cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
@@ -295,14 +302,17 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
tcf_bind_filter(tp, &cr.res, base);
}
- tcf_exts_change(tp, &cr.exts, &e);
+ if (old_r)
+ tcf_exts_change(tp, &r->exts, &e);
+ else
+ tcf_exts_change(tp, &cr.exts, &e);
tcf_tree_lock(tp);
if (old_r && old_r != r)
- memset(old_r, 0, sizeof(*old_r));
+ tcindex_filter_result_init(old_r);
memcpy(p, &cp, sizeof(cp));
- memcpy(r, &cr, sizeof(cr));
+ r->res = cr.res;
if (r == &new_filter_result) {
struct tcindex_filter **fp;
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index fd8fa9aa7c4e..5b3add31f9f1 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -25,7 +25,7 @@ cat << EOF
#define __IGNORE_rmdir /* unlinkat */
#define __IGNORE_lchown /* fchownat */
#define __IGNORE_access /* faccessat */
-#define __IGNORE_rename /* renameat */
+#define __IGNORE_rename /* renameat2 */
#define __IGNORE_readlink /* readlinkat */
#define __IGNORE_symlink /* symlinkat */
#define __IGNORE_utimes /* futimesat */
@@ -37,6 +37,9 @@ cat << EOF
#define __IGNORE_lstat64 /* fstatat64 */
#endif
+/* Missing flags argument */
+#define __IGNORE_renameat /* renameat2 */
+
/* CLOEXEC flag */
#define __IGNORE_pipe /* pipe2 */
#define __IGNORE_dup2 /* dup3 */
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index 8fb1488a3cd4..97130f88838b 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -66,7 +66,6 @@ extern int apparmor_initialized __initdata;
char *aa_split_fqname(char *args, char **ns_name);
void aa_info_message(const char *str);
void *__aa_kvmalloc(size_t size, gfp_t flags);
-void kvfree(void *buffer);
static inline void *kvmalloc(size_t size)
{
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 69689922c491..c1827e068454 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -104,17 +104,3 @@ void *__aa_kvmalloc(size_t size, gfp_t flags)
}
return buffer;
}
-
-/**
- * kvfree - free an allocation do by kvmalloc
- * @buffer: buffer to free (MAYBE_NULL)
- *
- * Free a buffer allocated by kvmalloc
- */
-void kvfree(void *buffer)
-{
- if (is_vmalloc_addr(buffer))
- vfree(buffer);
- else
- kfree(buffer);
-}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 8365909f5f8c..9134dbf70d3e 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -306,57 +306,138 @@ static int devcgroup_seq_show(struct seq_file *m, void *v)
}
/**
- * may_access - verifies if a new exception is part of what is allowed
- * by a dev cgroup based on the default policy +
- * exceptions. This is used to make sure a child cgroup
- * won't have more privileges than its parent or to
- * verify if a certain access is allowed.
- * @dev_cgroup: dev cgroup to be tested against
- * @refex: new exception
- * @behavior: behavior of the exception
+ * match_exception - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
*/
-static bool may_access(struct dev_cgroup *dev_cgroup,
- struct dev_exception_item *refex,
- enum devcg_behavior behavior)
+static bool match_exception(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
{
struct dev_exception_item *ex;
- bool match = false;
- rcu_lockdep_assert(rcu_read_lock_held() ||
- lockdep_is_held(&devcgroup_mutex),
- "device_cgroup::may_access() called without proper synchronization");
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+ continue;
+ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+ continue;
+ if (ex->major != ~0 && ex->major != major)
+ continue;
+ if (ex->minor != ~0 && ex->minor != minor)
+ continue;
+ /* provided access cannot have more than the exception rule */
+ if (access & (~ex->access))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
+{
+ struct dev_exception_item *ex;
- list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
- if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
continue;
- if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
continue;
- if (ex->major != ~0 && ex->major != refex->major)
+ /*
+ * We must be sure that both the exception and the provided
+ * range aren't masking all devices
+ */
+ if (ex->major != ~0 && major != ~0 && ex->major != major)
continue;
- if (ex->minor != ~0 && ex->minor != refex->minor)
+ if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
continue;
- if (refex->access & (~ex->access))
+ /*
+ * In order to make sure the provided range isn't matching
+ * an exception, all its access bits shouldn't match the
+ * exception's access bits
+ */
+ if (!(access & ex->access))
continue;
- match = true;
- break;
+ return true;
}
+ return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *refex,
+ enum devcg_behavior behavior)
+{
+ bool match = false;
+
+ rcu_lockdep_assert(rcu_read_lock_held() ||
+ lockdep_is_held(&devcgroup_mutex),
+ "device_cgroup:verify_new_ex called without proper synchronization");
if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
if (behavior == DEVCG_DEFAULT_ALLOW) {
- /* the exception will deny access to certain devices */
+ /*
+ * new exception in the child doesn't matter, only
+ * adding extra restrictions
+ */
return true;
} else {
- /* the exception will allow access to certain devices */
+ /*
+ * new exception in the child will add more devices
+ * that can be acessed, so it can't match any of
+ * parent's exceptions, even slightly
+ */
+ match = match_exception_partial(&dev_cgroup->exceptions,
+ refex->type,
+ refex->major,
+ refex->minor,
+ refex->access);
+
if (match)
- /*
- * a new exception allowing access shouldn't
- * match an parent's exception
- */
return false;
return true;
}
} else {
- /* only behavior == DEVCG_DEFAULT_DENY allowed here */
+ /*
+ * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+ * the new exception will add access to more devices and must
+ * be contained completely in an parent's exception to be
+ * allowed
+ */
+ match = match_exception(&dev_cgroup->exceptions, refex->type,
+ refex->major, refex->minor,
+ refex->access);
+
if (match)
/* parent has an exception that matches the proposed */
return true;
@@ -378,7 +459,38 @@ static int parent_has_perm(struct dev_cgroup *childcg,
if (!parent)
return 1;
- return may_access(parent, ex, childcg->behavior);
+ return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+ struct dev_exception_item *ex)
+{
+ struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
+
+ if (!parent)
+ return true;
+
+ /* It's always allowed to remove access to devices */
+ if (childcg->behavior == DEVCG_DEFAULT_DENY)
+ return true;
+
+ /*
+ * Make sure you're not removing part or a whole exception existing in
+ * the parent cgroup
+ */
+ return !match_exception_partial(&parent->exceptions, ex->type,
+ ex->major, ex->minor, ex->access);
}
/**
@@ -616,17 +728,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
switch (filetype) {
case DEVCG_ALLOW:
- if (!parent_has_perm(devcgroup, &ex))
- return -EPERM;
/*
* If the default policy is to allow by default, try to remove
* an matching exception instead. And be silent about it: we
* don't want to break compatibility
*/
if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ /* Check if the parent allows removing it first */
+ if (!parent_allows_removal(devcgroup, &ex))
+ return -EPERM;
dev_exception_rm(devcgroup, &ex);
- return 0;
+ break;
}
+
+ if (!parent_has_perm(devcgroup, &ex))
+ return -EPERM;
rc = dev_exception_add(devcgroup, &ex);
break;
case DEVCG_DENY:
@@ -704,18 +820,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor,
short access)
{
struct dev_cgroup *dev_cgroup;
- struct dev_exception_item ex;
- int rc;
-
- memset(&ex, 0, sizeof(ex));
- ex.type = type;
- ex.major = major;
- ex.minor = minor;
- ex.access = access;
+ bool rc;
rcu_read_lock();
dev_cgroup = task_devcgroup(current);
- rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+ /* Can't match any of the exceptions, even partially */
+ rc = !match_exception_partial(&dev_cgroup->exceptions,
+ type, major, minor, access);
+ else
+ /* Need to match completely one exception to be allowed */
+ rc = match_exception(&dev_cgroup->exceptions, type, major,
+ minor, access);
rcu_read_unlock();
if (!rc)
diff --git a/sound/isa/sb/sb_mixer.c b/sound/isa/sb/sb_mixer.c
index 6496822c1808..1ff78ec9f0ac 100644
--- a/sound/isa/sb/sb_mixer.c
+++ b/sound/isa/sb/sb_mixer.c
@@ -818,12 +818,14 @@ int snd_sbmixer_new(struct snd_sb *chip)
return err;
break;
case SB_HW_DT019X:
- if ((err = snd_sbmixer_init(chip,
- snd_dt019x_controls,
- ARRAY_SIZE(snd_dt019x_controls),
- snd_dt019x_init_values,
- ARRAY_SIZE(snd_dt019x_init_values),
- "DT019X")) < 0)
+ err = snd_sbmixer_init(chip,
+ snd_dt019x_controls,
+ ARRAY_SIZE(snd_dt019x_controls),
+ snd_dt019x_init_values,
+ ARRAY_SIZE(snd_dt019x_init_values),
+ "DT019X");
+ if (err < 0)
+ return err;
break;
default:
strcpy(card->mixername, "???");
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index b540ad71eb0d..2c54629d62d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1367,6 +1367,12 @@ static int azx_first_init(struct azx *chip)
/* initialize streams */
azx_init_stream(chip);
+ /* workaround for Broadwell HDMI: the first stream is broken,
+ * so mask it by keeping it as if opened
+ */
+ if (pci->vendor == 0x8086 && pci->device == 0x160c)
+ chip->azx_dev[0].opened = 1;
+
/* initialize chip */
azx_init_pci(chip);
azx_init_chip(chip, (probe_only[dev] & 2) == 0);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 0cb5b89cd0c8..b4218a19df22 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1127,8 +1127,10 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
AMP_OUT_UNMUTE);
eld = &per_pin->sink_eld;
- if (!eld->monitor_present)
+ if (!eld->monitor_present) {
+ hdmi_set_channel_count(codec, per_pin->cvt_nid, channels);
return;
+ }
if (!non_pcm && per_pin->chmap_set)
ca = hdmi_manual_channel_allocation(channels, per_pin->chmap);
@@ -3330,6 +3332,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3385,6 +3388,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
MODULE_ALIAS("snd-hda-codec-id:10de0051");
MODULE_ALIAS("snd-hda-codec-id:10de0060");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0071");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
MODULE_ALIAS("snd-hda-codec-id:11069f81");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c1952c910339..49e884fb3e5d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4616,13 +4616,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x065c, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x067e, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0680, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0684, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index fa158cfe9b32..d1929de641e2 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -376,7 +376,7 @@ static int aic31xx_dapm_power_event(struct snd_soc_dapm_widget *w,
reg = AIC31XX_ADCFLAG;
break;
default:
- dev_err(w->codec->dev, "Unknown widget '%s' calling %s/n",
+ dev_err(w->codec->dev, "Unknown widget '%s' calling %s\n",
w->name, __func__);
return -EINVAL;
}
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 5522d2566c67..ecd26dd2e442 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -154,6 +154,7 @@ static struct reg_default wm8962_reg[] = {
{ 40, 0x0000 }, /* R40 - SPKOUTL volume */
{ 41, 0x0000 }, /* R41 - SPKOUTR volume */
+ { 49, 0x0010 }, /* R49 - Class D Control 1 */
{ 51, 0x0003 }, /* R51 - Class D Control 2 */
{ 56, 0x0506 }, /* R56 - Clocking 4 */
@@ -795,7 +796,6 @@ static bool wm8962_volatile_register(struct device *dev, unsigned int reg)
case WM8962_ALC2:
case WM8962_THERMAL_SHUTDOWN_STATUS:
case WM8962_ADDITIONAL_CONTROL_4:
- case WM8962_CLASS_D_CONTROL_1:
case WM8962_DC_SERVO_6:
case WM8962_INTERRUPT_STATUS_1:
case WM8962_INTERRUPT_STATUS_2:
@@ -2929,13 +2929,22 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
static int wm8962_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
- int val;
+ int val, ret;
if (mute)
- val = WM8962_DAC_MUTE;
+ val = WM8962_DAC_MUTE | WM8962_DAC_MUTE_ALT;
else
val = 0;
+ /**
+ * The DAC mute bit is mirrored in two registers, update both to keep
+ * the register cache consistent.
+ */
+ ret = snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_1,
+ WM8962_DAC_MUTE_ALT, val);
+ if (ret < 0)
+ return ret;
+
return snd_soc_update_bits(codec, WM8962_ADC_DAC_CONTROL_1,
WM8962_DAC_MUTE, val);
}
diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
index a1a5d5294c19..910aafd09d21 100644
--- a/sound/soc/codecs/wm8962.h
+++ b/sound/soc/codecs/wm8962.h
@@ -1954,6 +1954,10 @@
#define WM8962_SPKOUTL_ENA_MASK 0x0040 /* SPKOUTL_ENA */
#define WM8962_SPKOUTL_ENA_SHIFT 6 /* SPKOUTL_ENA */
#define WM8962_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
+#define WM8962_DAC_MUTE_ALT 0x0010 /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_MASK 0x0010 /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_SHIFT 4 /* DAC_MUTE */
+#define WM8962_DAC_MUTE_ALT_WIDTH 1 /* DAC_MUTE */
#define WM8962_SPKOUTL_PGA_MUTE 0x0002 /* SPKOUTL_PGA_MUTE */
#define WM8962_SPKOUTL_PGA_MUTE_MASK 0x0002 /* SPKOUTL_PGA_MUTE */
#define WM8962_SPKOUTL_PGA_MUTE_SHIFT 1 /* SPKOUTL_PGA_MUTE */
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index c8e5db1414d7..496ce2eb2f1f 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -258,10 +258,16 @@ static int fsl_esai_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
return -EINVAL;
}
- if (ratio == 1) {
+ /* Only EXTAL source can be output directly without using PSR and PM */
+ if (ratio == 1 && clksrc == esai_priv->extalclk) {
/* Bypass all the dividers if not being needed */
ecr |= tx ? ESAI_ECR_ETO : ESAI_ECR_ERO;
goto out;
+ } else if (ratio < 2) {
+ /* The ratio should be no less than 2 if using other sources */
+ dev_err(dai->dev, "failed to derive required HCK%c rate\n",
+ tx ? 'T' : 'R');
+ return -EINVAL;
}
ret = fsl_esai_divisor_cal(dai, tx, ratio, false, 0);
@@ -307,7 +313,8 @@ static int fsl_esai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
return -EINVAL;
}
- if (esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
+ /* The ratio should be contented by FP alone if bypassing PM and PSR */
+ if (!esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) {
dev_err(dai->dev, "the ratio is out of range (1 ~ 16)\n");
return -EINVAL;
}
@@ -454,12 +461,6 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
}
if (!dai->active) {
- /* Reset Port C */
- regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
- ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
- regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
- ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
-
/* Set synchronous mode */
regmap_update_bits(esai_priv->regmap, REG_ESAI_SAICR,
ESAI_SAICR_SYNC, esai_priv->synchronous ?
@@ -519,6 +520,11 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val);
+ /* Remove ESAI personal reset by configuring ESAI_PCRC and ESAI_PRRC */
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_PRRC,
+ ESAI_PRRC_PDC_MASK, ESAI_PRRC_PDC(ESAI_GPIO));
+ regmap_update_bits(esai_priv->regmap, REG_ESAI_PCRC,
+ ESAI_PCRC_PC_MASK, ESAI_PCRC_PC(ESAI_GPIO));
return 0;
}
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index ac869931d7f1..267717aa96c1 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -145,7 +145,7 @@ static const struct file_operations audmux_debugfs_fops = {
.llseek = default_llseek,
};
-static void __init audmux_debugfs_init(void)
+static void audmux_debugfs_init(void)
{
int i;
char buf[20];
diff --git a/sound/soc/intel/sst-acpi.c b/sound/soc/intel/sst-acpi.c
index 5d06eecb6198..18aee77f8d4a 100644
--- a/sound/soc/intel/sst-acpi.c
+++ b/sound/soc/intel/sst-acpi.c
@@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
sst_pdata = &sst_acpi->sst_pdata;
sst_pdata->id = desc->sst_id;
+ sst_pdata->dma_dev = dev;
sst_acpi->desc = desc;
sst_acpi->mach = mach;
diff --git a/sound/soc/intel/sst-baytrail-dsp.c b/sound/soc/intel/sst-baytrail-dsp.c
index a50bf7fc0e3a..adf0aca5aca6 100644
--- a/sound/soc/intel/sst-baytrail-dsp.c
+++ b/sound/soc/intel/sst-baytrail-dsp.c
@@ -324,7 +324,7 @@ static int sst_byt_init(struct sst_dsp *sst, struct sst_pdata *pdata)
memcpy_toio(sst->addr.lpe + SST_BYT_MAILBOX_OFFSET,
&pdata->fw_base, sizeof(u32));
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(sst->dma_dev, DMA_BIT_MASK(32));
if (ret)
return ret;
diff --git a/sound/soc/intel/sst-baytrail-ipc.c b/sound/soc/intel/sst-baytrail-ipc.c
index d0eaeee21be4..0d31dbbf4806 100644
--- a/sound/soc/intel/sst-baytrail-ipc.c
+++ b/sound/soc/intel/sst-baytrail-ipc.c
@@ -542,16 +542,20 @@ struct sst_byt_stream *sst_byt_stream_new(struct sst_byt *byt, int id,
void *data)
{
struct sst_byt_stream *stream;
+ struct sst_dsp *sst = byt->dsp;
+ unsigned long flags;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
return NULL;
+ spin_lock_irqsave(&sst->spinlock, flags);
list_add(&stream->node, &byt->stream_list);
stream->notify_position = notify_position;
stream->pdata = data;
stream->byt = byt;
stream->str_id = id;
+ spin_unlock_irqrestore(&sst->spinlock, flags);
return stream;
}
@@ -630,6 +634,8 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
{
u64 header;
int ret = 0;
+ struct sst_dsp *sst = byt->dsp;
+ unsigned long flags;
if (!stream->commited)
goto out;
@@ -644,8 +650,10 @@ int sst_byt_stream_free(struct sst_byt *byt, struct sst_byt_stream *stream)
stream->commited = false;
out:
+ spin_lock_irqsave(&sst->spinlock, flags);
list_del(&stream->node);
kfree(stream);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
return ret;
}
diff --git a/sound/soc/intel/sst-dsp-priv.h b/sound/soc/intel/sst-dsp-priv.h
index 30ca14a6a835..401213455497 100644
--- a/sound/soc/intel/sst-dsp-priv.h
+++ b/sound/soc/intel/sst-dsp-priv.h
@@ -228,6 +228,7 @@ struct sst_dsp {
spinlock_t spinlock; /* IPC locking */
struct mutex mutex; /* DSP FW lock */
struct device *dev;
+ struct device *dma_dev;
void *thread_context;
int irq;
u32 id;
diff --git a/sound/soc/intel/sst-dsp.c b/sound/soc/intel/sst-dsp.c
index 0c129fd85ecf..0b715b20a2d7 100644
--- a/sound/soc/intel/sst-dsp.c
+++ b/sound/soc/intel/sst-dsp.c
@@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
spin_lock_init(&sst->spinlock);
mutex_init(&sst->mutex);
sst->dev = dev;
+ sst->dma_dev = pdata->dma_dev;
sst->thread_context = sst_dev->thread_context;
sst->sst_dev = sst_dev;
sst->id = pdata->id;
diff --git a/sound/soc/intel/sst-dsp.h b/sound/soc/intel/sst-dsp.h
index 74052b59485c..e44423be66c4 100644
--- a/sound/soc/intel/sst-dsp.h
+++ b/sound/soc/intel/sst-dsp.h
@@ -169,6 +169,7 @@ struct sst_pdata {
u32 dma_base;
u32 dma_size;
int dma_engine;
+ struct device *dma_dev;
/* DSP */
u32 id;
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index f7687107cf7f..928f228c38e7 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
sst_fw->private = private;
sst_fw->size = fw->size;
- err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
- if (err < 0) {
- kfree(sst_fw);
- return NULL;
- }
-
/* allocate DMA buffer to store FW data */
- sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size,
+ sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
&sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
if (!sst_fw->dma_buf) {
dev_err(dsp->dev, "error: DMA alloc failed\n");
@@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw)
list_del(&sst_fw->list);
mutex_unlock(&dsp->mutex);
- dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
+ dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
sst_fw->dmable_fw_paddr);
kfree(sst_fw);
}
@@ -202,6 +196,9 @@ static int block_alloc_contiguous(struct sst_module *module,
size -= block->size;
}
+ list_for_each_entry(block, &tmp, list)
+ list_add(&block->module_list, &module->block_list);
+
list_splice(&tmp, &dsp->used_block_list);
return 0;
}
@@ -247,8 +244,7 @@ static int block_alloc(struct sst_module *module,
/* do we span > 1 blocks */
if (data->size > block->size) {
ret = block_alloc_contiguous(module, data,
- block->offset + block->size,
- data->size - block->size);
+ block->offset, data->size);
if (ret == 0)
return ret;
}
@@ -344,7 +340,7 @@ static int block_alloc_fixed(struct sst_module *module,
err = block_alloc_contiguous(module, data,
block->offset + block->size,
- data->size - block->size + data->offset - block->offset);
+ data->size - block->size);
if (err < 0)
return -ENOMEM;
@@ -371,15 +367,10 @@ static int block_alloc_fixed(struct sst_module *module,
if (data->offset >= block->offset && data->offset < block_end) {
err = block_alloc_contiguous(module, data,
- block->offset + block->size,
- data->size - block->size);
+ block->offset, data->size);
if (err < 0)
return -ENOMEM;
- /* add block */
- block->data_type = data->data_type;
- list_move(&block->list, &dsp->used_block_list);
- list_add(&block->module_list, &module->block_list);
return 0;
}
diff --git a/sound/soc/intel/sst-haswell-dsp.c b/sound/soc/intel/sst-haswell-dsp.c
index f5ebf36af889..535f517629fd 100644
--- a/sound/soc/intel/sst-haswell-dsp.c
+++ b/sound/soc/intel/sst-haswell-dsp.c
@@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
int ret = -ENODEV, i, j, region_count;
u32 offset, size;
- dev = sst->dev;
+ dev = sst->dma_dev;
switch (sst->id) {
case SST_DEV_ID_LYNX_POINT:
@@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
return ret;
}
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
if (ret)
return ret;
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 50e4246d4b57..e7996b39a484 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -1159,11 +1159,14 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
void *data)
{
struct sst_hsw_stream *stream;
+ struct sst_dsp *sst = hsw->dsp;
+ unsigned long flags;
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
return NULL;
+ spin_lock_irqsave(&sst->spinlock, flags);
list_add(&stream->node, &hsw->stream_list);
stream->notify_position = notify_position;
stream->pdata = data;
@@ -1172,6 +1175,7 @@ struct sst_hsw_stream *sst_hsw_stream_new(struct sst_hsw *hsw, int id,
/* work to process notification messages */
INIT_WORK(&stream->notify_work, hsw_notification_work);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
return stream;
}
@@ -1180,6 +1184,8 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
{
u32 header;
int ret = 0;
+ struct sst_dsp *sst = hsw->dsp;
+ unsigned long flags;
/* dont free DSP streams that are not commited */
if (!stream->commited)
@@ -1201,8 +1207,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
trace_hsw_stream_free_req(stream, &stream->free_req);
out:
+ cancel_work_sync(&stream->notify_work);
+ spin_lock_irqsave(&sst->spinlock, flags);
list_del(&stream->node);
kfree(stream);
+ spin_unlock_irqrestore(&sst->spinlock, flags);
return ret;
}
@@ -1538,10 +1547,28 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
}
/* Stream pointer positions */
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
struct sst_hsw_stream *stream)
{
- return stream->rpos.position;
+ u32 rpos;
+
+ sst_dsp_read(hsw->dsp, &rpos,
+ stream->reply.read_position_register_address, sizeof(rpos));
+
+ return rpos;
+}
+
+/* Stream presentation (monotonic) positions */
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
+ struct sst_hsw_stream *stream)
+{
+ u64 ppos;
+
+ sst_dsp_read(hsw->dsp, &ppos,
+ stream->reply.presentation_position_register_address,
+ sizeof(ppos));
+
+ return ppos;
}
int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
diff --git a/sound/soc/intel/sst-haswell-ipc.h b/sound/soc/intel/sst-haswell-ipc.h
index d517929ccc38..2ac194a6d04b 100644
--- a/sound/soc/intel/sst-haswell-ipc.h
+++ b/sound/soc/intel/sst-haswell-ipc.h
@@ -464,7 +464,9 @@ int sst_hsw_stream_get_write_pos(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 *position);
int sst_hsw_stream_set_write_position(struct sst_hsw *hsw,
struct sst_hsw_stream *stream, u32 stage_id, u32 position);
-int sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+u32 sst_hsw_get_dsp_position(struct sst_hsw *hsw,
+ struct sst_hsw_stream *stream);
+u64 sst_hsw_get_dsp_presentation_position(struct sst_hsw *hsw,
struct sst_hsw_stream *stream);
/* HW port config */
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index 0a32dd13a23d..9d5f64a583a3 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -99,6 +99,7 @@ struct hsw_pcm_data {
struct snd_compr_stream *cstream;
unsigned int wpos;
struct mutex mutex;
+ bool allocated;
};
/* private data for the driver */
@@ -107,12 +108,14 @@ struct hsw_priv_data {
struct sst_hsw *hsw;
/* page tables */
- unsigned char *pcm_pg[HSW_PCM_COUNT][2];
+ struct snd_dma_buffer dmab[HSW_PCM_COUNT][2];
/* DAI data */
struct hsw_pcm_data pcm[HSW_PCM_COUNT];
};
+static u32 hsw_notify_pointer(struct sst_hsw_stream *stream, void *data);
+
static inline u32 hsw_mixer_to_ipc(unsigned int value)
{
if (value >= ARRAY_SIZE(volume_map))
@@ -273,28 +276,26 @@ static const struct snd_kcontrol_new hsw_volume_controls[] = {
};
/* Create DMA buffer page table for DSP */
-static int create_adsp_page_table(struct hsw_priv_data *pdata,
- struct snd_soc_pcm_runtime *rtd,
- unsigned char *dma_area, size_t size, int pcm, int stream)
+static int create_adsp_page_table(struct snd_pcm_substream *substream,
+ struct hsw_priv_data *pdata, struct snd_soc_pcm_runtime *rtd,
+ unsigned char *dma_area, size_t size, int pcm)
{
- int i, pages;
+ struct snd_dma_buffer *dmab = snd_pcm_get_dma_buf(substream);
+ int i, pages, stream = substream->stream;
- if (size % PAGE_SIZE)
- pages = (size / PAGE_SIZE) + 1;
- else
- pages = size / PAGE_SIZE;
+ pages = snd_sgbuf_aligned_pages(size);
dev_dbg(rtd->dev, "generating page table for %p size 0x%zu pages %d\n",
dma_area, size, pages);
for (i = 0; i < pages; i++) {
u32 idx = (((i << 2) + i)) >> 1;
- u32 pfn = (virt_to_phys(dma_area + i * PAGE_SIZE)) >> PAGE_SHIFT;
+ u32 pfn = snd_sgbuf_get_addr(dmab, i * PAGE_SIZE) >> PAGE_SHIFT;
u32 *pg_table;
dev_dbg(rtd->dev, "pfn i %i idx %d pfn %x\n", i, idx, pfn);
- pg_table = (u32*)(pdata->pcm_pg[pcm][stream] + idx);
+ pg_table = (u32 *)(pdata->dmab[pcm][stream].area + idx);
if (i & 1)
*pg_table |= (pfn << 4);
@@ -317,12 +318,36 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
struct sst_hsw *hsw = pdata->hsw;
struct sst_module *module_data;
struct sst_dsp *dsp;
+ struct snd_dma_buffer *dmab;
enum sst_hsw_stream_type stream_type;
enum sst_hsw_stream_path_id path_id;
u32 rate, bits, map, pages, module_id;
u8 channels;
int ret;
+ /* check if we are being called a subsequent time */
+ if (pcm_data->allocated) {
+ ret = sst_hsw_stream_reset(hsw, pcm_data->stream);
+ if (ret < 0)
+ dev_dbg(rtd->dev, "error: reset stream failed %d\n",
+ ret);
+
+ ret = sst_hsw_stream_free(hsw, pcm_data->stream);
+ if (ret < 0) {
+ dev_dbg(rtd->dev, "error: free stream failed %d\n",
+ ret);
+ return ret;
+ }
+ pcm_data->allocated = false;
+
+ pcm_data->stream = sst_hsw_stream_new(hsw, rtd->cpu_dai->id,
+ hsw_notify_pointer, pcm_data);
+ if (pcm_data->stream == NULL) {
+ dev_err(rtd->dev, "error: failed to create stream\n");
+ return -EINVAL;
+ }
+ }
+
/* stream direction */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
path_id = SST_HSW_STREAM_PATH_SSP0_OUT;
@@ -416,8 +441,10 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
}
- ret = create_adsp_page_table(pdata, rtd, runtime->dma_area,
- runtime->dma_bytes, rtd->cpu_dai->id, substream->stream);
+ dmab = snd_pcm_get_dma_buf(substream);
+
+ ret = create_adsp_page_table(substream, pdata, rtd, runtime->dma_area,
+ runtime->dma_bytes, rtd->cpu_dai->id);
if (ret < 0)
return ret;
@@ -430,9 +457,9 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
pages = runtime->dma_bytes / PAGE_SIZE;
ret = sst_hsw_stream_buffer(hsw, pcm_data->stream,
- virt_to_phys(pdata->pcm_pg[rtd->cpu_dai->id][substream->stream]),
+ pdata->dmab[rtd->cpu_dai->id][substream->stream].addr,
pages, runtime->dma_bytes, 0,
- (u32)(virt_to_phys(runtime->dma_area) >> PAGE_SHIFT));
+ snd_sgbuf_get_addr(dmab, 0) >> PAGE_SHIFT);
if (ret < 0) {
dev_err(rtd->dev, "error: failed to set DMA buffer %d\n", ret);
return ret;
@@ -474,6 +501,7 @@ static int hsw_pcm_hw_params(struct snd_pcm_substream *substream,
dev_err(rtd->dev, "error: failed to commit stream %d\n", ret);
return ret;
}
+ pcm_data->allocated = true;
ret = sst_hsw_stream_pause(hsw, pcm_data->stream, 1);
if (ret < 0)
@@ -541,12 +569,14 @@ static snd_pcm_uframes_t hsw_pcm_pointer(struct snd_pcm_substream *substream)
struct hsw_pcm_data *pcm_data = snd_soc_pcm_get_drvdata(rtd);
struct sst_hsw *hsw = pdata->hsw;
snd_pcm_uframes_t offset;
+ uint64_t ppos;
+ u32 position = sst_hsw_get_dsp_position(hsw, pcm_data->stream);
- offset = bytes_to_frames(runtime,
- sst_hsw_get_dsp_position(hsw, pcm_data->stream));
+ offset = bytes_to_frames(runtime, position);
+ ppos = sst_hsw_get_dsp_presentation_position(hsw, pcm_data->stream);
- dev_dbg(rtd->dev, "PCM: DMA pointer %zu bytes\n",
- frames_to_bytes(runtime, (u32)offset));
+ dev_dbg(rtd->dev, "PCM: DMA pointer %du bytes, pos %llu\n",
+ position, ppos);
return offset;
}
@@ -606,6 +636,7 @@ static int hsw_pcm_close(struct snd_pcm_substream *substream)
dev_dbg(rtd->dev, "error: free stream failed %d\n", ret);
goto out;
}
+ pcm_data->allocated = 0;
pcm_data->stream = NULL;
out:
@@ -621,7 +652,7 @@ static struct snd_pcm_ops hsw_pcm_ops = {
.hw_free = hsw_pcm_hw_free,
.trigger = hsw_pcm_trigger,
.pointer = hsw_pcm_pointer,
- .mmap = snd_pcm_lib_default_mmap,
+ .page = snd_pcm_sgbuf_ops_page,
};
static void hsw_pcm_free(struct snd_pcm *pcm)
@@ -632,17 +663,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm)
static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm *pcm = rtd->pcm;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct sst_pdata *pdata = dev_get_platdata(platform->dev);
+ struct device *dev = pdata->dma_dev;
int ret = 0;
- ret = dma_coerce_mask_and_coherent(rtd->card->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV,
- rtd->card->dev,
+ SNDRV_DMA_TYPE_DEV_SG,
+ dev,
hsw_pcm_hardware.buffer_bytes_max,
hsw_pcm_hardware.buffer_bytes_max);
if (ret) {
@@ -742,11 +772,14 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
{
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
struct hsw_priv_data *priv_data;
- int i;
+ struct device *dma_dev;
+ int i, ret = 0;
if (!pdata)
return -ENODEV;
+ dma_dev = pdata->dma_dev;
+
priv_data = devm_kzalloc(platform->dev, sizeof(*priv_data), GFP_KERNEL);
priv_data->hsw = pdata->dsp;
snd_soc_platform_set_drvdata(platform, priv_data);
@@ -758,15 +791,17 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
/* playback */
if (hsw_dais[i].playback.channels_min) {
- priv_data->pcm_pg[i][0] = kzalloc(PAGE_SIZE, GFP_DMA);
- if (priv_data->pcm_pg[i][0] == NULL)
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+ PAGE_SIZE, &priv_data->dmab[i][0]);
+ if (ret < 0)
goto err;
}
/* capture */
if (hsw_dais[i].capture.channels_min) {
- priv_data->pcm_pg[i][1] = kzalloc(PAGE_SIZE, GFP_DMA);
- if (priv_data->pcm_pg[i][1] == NULL)
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dma_dev,
+ PAGE_SIZE, &priv_data->dmab[i][1]);
+ if (ret < 0)
goto err;
}
}
@@ -776,11 +811,11 @@ static int hsw_pcm_probe(struct snd_soc_platform *platform)
err:
for (;i >= 0; i--) {
if (hsw_dais[i].playback.channels_min)
- kfree(priv_data->pcm_pg[i][0]);
+ snd_dma_free_pages(&priv_data->dmab[i][0]);
if (hsw_dais[i].capture.channels_min)
- kfree(priv_data->pcm_pg[i][1]);
+ snd_dma_free_pages(&priv_data->dmab[i][1]);
}
- return -ENOMEM;
+ return ret;
}
static int hsw_pcm_remove(struct snd_soc_platform *platform)
@@ -791,9 +826,9 @@ static int hsw_pcm_remove(struct snd_soc_platform *platform)
for (i = 0; i < ARRAY_SIZE(hsw_dais); i++) {
if (hsw_dais[i].playback.channels_min)
- kfree(priv_data->pcm_pg[i][0]);
+ snd_dma_free_pages(&priv_data->dmab[i][0]);
if (hsw_dais[i].capture.channels_min)
- kfree(priv_data->pcm_pg[i][1]);
+ snd_dma_free_pages(&priv_data->dmab[i][1]);
}
return 0;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 215b668166be..89424470a1f3 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -197,13 +197,12 @@ static void rsnd_dma_complete(void *data)
* rsnd_dai_pointer_update() will be called twice,
* ant it will breaks io->byte_pos
*/
-
- rsnd_dai_pointer_update(io, io->byte_per_period);
-
if (dma->submit_loop)
rsnd_dma_continue(dma);
rsnd_unlock(priv, flags);
+
+ rsnd_dai_pointer_update(io, io->byte_per_period);
}
static void __rsnd_dma_start(struct rsnd_dma *dma)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7769b0a2bc5a..6d6ceee447d5 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1612,8 +1612,11 @@ static void dapm_pre_sequence_async(void *data, async_cookie_t cookie)
"ASoC: Failed to turn on bias: %d\n", ret);
}
- /* Prepare for a STADDBY->ON or ON->STANDBY transition */
- if (d->bias_level != d->target_bias_level) {
+ /* Prepare for a transition to ON or away from ON */
+ if ((d->target_bias_level == SND_SOC_BIAS_ON &&
+ d->bias_level != SND_SOC_BIAS_ON) ||
+ (d->target_bias_level != SND_SOC_BIAS_ON &&
+ d->bias_level == SND_SOC_BIAS_ON)) {
ret = snd_soc_dapm_set_bias_level(d, SND_SOC_BIAS_PREPARE);
if (ret != 0)
dev_err(d->dev,
@@ -3475,8 +3478,11 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
cpu_dai = rtd->cpu_dai;
codec_dai = rtd->codec_dai;
- /* dynamic FE links have no fixed DAI mapping */
- if (rtd->dai_link->dynamic)
+ /*
+ * dynamic FE links have no fixed DAI mapping.
+ * CODEC<->CODEC links have no direct connection.
+ */
+ if (rtd->dai_link->dynamic || rtd->dai_link->params)
continue;
/* there is no point in connecting BE DAI links with dummies */
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 2cedf09f6d96..a391de058037 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1675,7 +1675,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
- if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)
+ if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
continue;
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 893d5a1afc3c..c3b5b7dca1c3 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -651,7 +651,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
int err = -ENODEV;
down_read(&chip->shutdown_rwsem);
- if (chip->probing)
+ if (chip->probing && chip->in_pm)
err = 0;
else if (!chip->shutdown)
err = usb_autopm_get_interface(chip->pm_intf);
@@ -663,7 +663,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
void snd_usb_autosuspend(struct snd_usb_audio *chip)
{
down_read(&chip->shutdown_rwsem);
- if (!chip->shutdown && !chip->probing)
+ if (!chip->shutdown && !chip->probing && !chip->in_pm)
usb_autopm_put_interface(chip->pm_intf);
up_read(&chip->shutdown_rwsem);
}
@@ -695,8 +695,9 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
chip->autosuspended = 1;
}
- list_for_each_entry(mixer, &chip->mixer_list, list)
- snd_usb_mixer_suspend(mixer);
+ if (chip->num_suspended_intf == 1)
+ list_for_each_entry(mixer, &chip->mixer_list, list)
+ snd_usb_mixer_suspend(mixer);
return 0;
}
@@ -711,6 +712,8 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
return 0;
if (--chip->num_suspended_intf)
return 0;
+
+ chip->in_pm = 1;
/*
* ALSA leaves material resumption to user space
* we just notify and restart the mixers
@@ -726,6 +729,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
chip->autosuspended = 0;
err_out:
+ chip->in_pm = 0;
return err;
}
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 9867ab866857..97acb906acc2 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -92,6 +92,7 @@ struct snd_usb_endpoint {
unsigned int curframesize; /* current packet size in frames (for capture) */
unsigned int syncmaxsize; /* sync endpoint packet size */
unsigned int fill_max:1; /* fill max packet size always */
+ unsigned int udh01_fb_quirk:1; /* corrupted feedback data */
unsigned int datainterval; /* log_2 of data packet interval */
unsigned int syncinterval; /* P for adaptive mode, 0 otherwise */
unsigned char silence_value;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index e70a87e0d9fe..289f582c9130 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -471,6 +471,10 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
ep->syncinterval = 3;
ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
+
+ if (chip->usb_id == USB_ID(0x0644, 0x8038) /* TEAC UD-H01 */ &&
+ ep->syncmaxsize == 4)
+ ep->udh01_fb_quirk = 1;
}
list_add_tail(&ep->list, &chip->ep_list);
@@ -1105,7 +1109,16 @@ void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
if (f == 0)
return;
- if (unlikely(ep->freqshift == INT_MIN)) {
+ if (unlikely(sender->udh01_fb_quirk)) {
+ /*
+ * The TEAC UD-H01 firmware sometimes changes the feedback value
+ * by +/- 0x1.0000.
+ */
+ if (f < ep->freqn - 0x8000)
+ f += 0x10000;
+ else if (f > ep->freqn + 0x8000)
+ f -= 0x10000;
+ } else if (unlikely(ep->freqshift == INT_MIN)) {
/*
* The first time we see a feedback value, determine its format
* by shifting it left or right until it matches the nominal
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 131336d40492..c62a1659106d 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -1501,9 +1501,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
* The error should be lower than 2ms since the estimate relies
* on two reads of a counter updated every ms.
*/
- if (printk_ratelimit() &&
- abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
- dev_dbg(&subs->dev->dev,
+ if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+ dev_dbg_ratelimited(&subs->dev->dev,
"delay: estimated %d, actual %d\n",
est_delay, subs->last_delay);
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 25c4c7e217de..91d0380431b4 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -40,6 +40,7 @@ struct snd_usb_audio {
struct rw_semaphore shutdown_rwsem;
unsigned int shutdown:1;
unsigned int probing:1;
+ unsigned int in_pm:1;
unsigned int autosuspended:1;
unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
diff --git a/tools/Makefile b/tools/Makefile
index bcae806b0c39..9a617adc6675 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -44,6 +44,9 @@ cpupower: FORCE
cgroup firewire hv guest usb virtio vm net: FORCE
$(call descend,$@)
+liblockdep: FORCE
+ $(call descend,lib/lockdep)
+
libapikfs: FORCE
$(call descend,lib/api)
@@ -91,6 +94,9 @@ cpupower_clean:
cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
$(call descend,$(@:_clean=),clean)
+liblockdep_clean:
+ $(call descend,lib/lockdep,clean)
+
libapikfs_clean:
$(call descend,lib/api,clean)
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index cb09d3ff8f58..bba2f5253b6e 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -1,8 +1,7 @@
# file format version
FILE_VERSION = 1
-MAKEFLAGS += --no-print-directory
-LIBLOCKDEP_VERSION=$(shell make -sC ../../.. kernelversion)
+LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
@@ -231,7 +230,7 @@ install_lib: all_cmd
install: install_lib
clean:
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
+ $(RM) *.o *~ $(TARGETS) *.a *liblockdep*.so* $(VERSION_FILES) .*.d
$(RM) tags TAGS
endif # skip-makefile