summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt20
-rw-r--r--Documentation/hwmon/max1606462
-rw-r--r--Documentation/hwmon/max3444079
-rw-r--r--Documentation/hwmon/max868869
-rw-r--r--Documentation/hwmon/pmbus34
-rw-r--r--Documentation/hwmon/smm6658
-rw-r--r--Documentation/hwmon/submitting-patches109
-rw-r--r--Documentation/input/event-codes.txt262
-rw-r--r--Documentation/md.txt10
-rw-r--r--Documentation/sound/alsa/SB-Live-mixer.txt6
-rw-r--r--MAINTAINERS56
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/core_mcpcia.c12
-rw-r--r--arch/alpha/kernel/err_titan.c4
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/setup.c6
-rw-r--r--arch/alpha/kernel/smc37c93x.c3
-rw-r--r--arch/alpha/kernel/sys_wildfire.c5
-rw-r--r--arch/alpha/kernel/time.c1
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Kconfig.debug11
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/thread_notify.h1
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/calls.S4
-rw-r--r--arch/arm/kernel/elf.c17
-rw-r--r--arch/arm/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/traps.c3
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/mfp-pxa168.h9
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c5
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/gpio.h17
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-pxa/pxa25x.c2
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c5
-rw-r--r--arch/arm/mach-tegra/gpio.c6
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c9
-rw-r--r--arch/arm/mach-ux500/board-mop500.c19
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-s5p/pm.c11
-rw-r--r--arch/arm/plat-samsung/pm-check.c6
-rw-r--r--arch/arm/plat-samsung/pm.c5
-rw-r--r--arch/arm/vfp/vfpmodule.c34
-rw-r--r--arch/avr32/include/asm/setup.h9
-rw-r--r--arch/avr32/kernel/setup.c15
-rw-r--r--arch/avr32/kernel/traps.c22
-rw-r--r--arch/avr32/mach-at32ap/clock.c24
-rw-r--r--arch/avr32/mach-at32ap/extint.c22
-rw-r--r--arch/avr32/mach-at32ap/pio.c2
-rw-r--r--arch/avr32/mach-at32ap/pm-at32ap700x.S2
-rw-r--r--arch/blackfin/include/asm/system.h36
-rw-r--r--arch/blackfin/kernel/gptimers.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c8
-rw-r--r--arch/blackfin/mach-common/smp.c19
-rw-r--r--arch/m68k/include/asm/unistd.h6
-rw-r--r--arch/m68k/kernel/entry_mm.S4
-rw-r--r--arch/m68k/kernel/syscalltable.S4
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/cputable.h16
-rw-r--r--arch/powerpc/include/asm/pte-common.h2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash.c12
-rw-r--r--arch/powerpc/kernel/ibmebus.c6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c8
-rw-r--r--arch/powerpc/kernel/perf_event.c37
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c8
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/switch_cpu.S4
-rw-r--r--arch/s390/kernel/switch_cpu64.S4
-rw-r--r--arch/s390/oprofile/hwsampler.c6
-rw-r--r--arch/um/Kconfig.x864
-rw-r--r--arch/um/include/asm/bug.h6
-rw-r--r--arch/x86/include/asm/gart.h24
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/numa.h2
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/cpu/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c28
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c9
-rw-r--r--arch/x86/mm/numa.c31
-rw-r--r--arch/x86/mm/numa_emulation.c20
-rw-r--r--arch/x86/platform/ce4100/falconfalls.dts2
-rw-r--r--arch/x86/platform/mrst/mrst.c10
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/enlighten.c21
-rw-r--r--arch/x86/xen/mmu.c17
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/xtensa/kernel/irq.c18
-rw-r--r--block/blk-core.c178
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/cfq-iosched.c26
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c8
-rw-r--r--drivers/amba/bus.c6
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h4
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c60
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/pata_at91.c22
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/power/main.c8
-rw-r--r--drivers/base/syscore.c2
-rw-r--r--drivers/char/agp/generic.c19
-rw-r--r--drivers/char/virtio_console.c11
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/gpio/ml_ioh_gpio.c1
-rw-r--r--drivers/gpio/pca953x.c5
-rw-r--r--drivers/gpio/pch_gpio.c1
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c78
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c25
-rw-r--r--drivers/gpu/drm/radeon/atom.c12
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c108
-rw-r--r--drivers/gpu/drm/radeon/r600.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/stub/Kconfig1
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c22
-rw-r--r--drivers/i2c/i2c-core.c6
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-gd.c7
-rw-r--r--drivers/input/evdev.c33
-rw-r--r--drivers/input/input.c40
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c6
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c17
-rw-r--r--drivers/leds/leds-regulator.c4
-rw-r--r--drivers/md/dm-raid.c8
-rw-r--r--drivers/md/md.c88
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c27
-rw-r--r--drivers/md/raid5.c66
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/video/videobuf-dma-contig.c2
-rw-r--r--drivers/mfd/mfd-core.c16
-rw-r--r--drivers/misc/sgi-gru/grufile.c8
-rw-r--r--drivers/mtd/mtdswap.c2
-rw-r--r--drivers/mtd/nand/atmel_nand.c32
-rw-r--r--drivers/net/benet/be.h4
-rw-r--r--drivers/net/benet/be_main.c19
-rw-r--r--drivers/net/bna/bfa_ioc.c41
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c28
-rw-r--r--drivers/net/bna/bfi.h6
-rw-r--r--drivers/net/bna/bnad.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c9
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/mlx4/en_rx.c4
-rw-r--r--drivers/net/mlx4/main.c5
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx4/sense.c4
-rw-r--r--drivers/net/natsemi.c3
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c17
-rw-r--r--drivers/net/pppoe.c2
-rw-r--r--drivers/net/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c14
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c22
-rw-r--r--drivers/net/sfc/nic.h1
-rw-r--r--drivers/net/sfc/selftest.c25
-rw-r--r--drivers/net/sfc/tx.c3
-rw-r--r--drivers/net/sis900.c23
-rw-r--r--drivers/net/smsc911x.c8
-rw-r--r--drivers/net/stmmac/dwmac_lib.c28
-rw-r--r--drivers/net/stmmac/stmmac_main.c49
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/usb/smsc95xx.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c6
-rw-r--r--drivers/net/wireless/ath/regd_common.h1
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/dma.h2
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c17
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h2
-rw-r--r--drivers/net/wireless/mwl8k.c9
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c2
-rw-r--r--drivers/net/wireless/wl12xx/spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c20
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h1
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/intel-iommu.c55
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/setup-bus.c4
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c5
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c15
-rw-r--r--drivers/platform/x86/Kconfig3
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c43
-rw-r--r--drivers/platform/x86/samsung-laptop.c17
-rw-r--r--drivers/platform/x86/sony-laptop.c65
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c3
-rw-r--r--drivers/rapidio/rio.c5
-rw-r--r--drivers/rapidio/switches/idt_gen2.c1
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/interface.c26
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-coh901331.c4
-rw-r--r--drivers/rtc/rtc-mc13xxx.c1
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/cio/device.c24
-rw-r--r--drivers/s390/cio/qdio_main.c16
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c19
-rw-r--r--drivers/spi/amba-pl022.c2
-rw-r--r--drivers/spi/dw_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/spi_bfin5xx.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/samsung-laptop/Kconfig10
-rw-r--r--drivers/staging/samsung-laptop/Makefile1
-rw-r--r--drivers/staging/samsung-laptop/TODO5
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c843
-rw-r--r--drivers/tty/n_gsm.c8
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/devices.c10
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c12
-rw-r--r--drivers/usb/gadget/f_audio.c1
-rw-r--r--drivers/usb/gadget/f_eem.c8
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c20
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/pch_udc.c8
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/ehci-q.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/pci-quirks.c117
-rw-r--r--drivers/usb/host/xhci-mem.c106
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c219
-rw-r--r--drivers/usb/host/xhci.c23
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/blackfin.c24
-rw-r--r--drivers/usb/musb/cppi_dma.c27
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h5
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/musbhsdma.c8
-rw-r--r--drivers/usb/musb/omap2430.c3
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/qcserial.c31
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--drivers/virtio/virtio_pci.c15
-rw-r--r--drivers/virtio/virtio_ring.c1
-rw-r--r--drivers/xen/events.c6
-rw-r--r--drivers/xen/manage.c15
-rw-r--r--fs/9p/fid.c15
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/9p/vfs_super.c80
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/btrfs/acl.c9
-rw-r--r--fs/btrfs/ctree.h9
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c125
-rw-r--r--fs/btrfs/extent_io.c82
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/free-space-cache.c119
-rw-r--r--fs/btrfs/inode.c165
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/super.c42
-rw-r--r--fs/btrfs/transaction.c48
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/btrfs/xattr.c33
-rw-r--r--fs/cifs/README16
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c43
-rw-r--r--fs/cifs/cifs_spnego.c4
-rw-r--r--fs/cifs/cifs_unicode.c35
-rw-r--r--fs/cifs/cifs_unicode.h2
-rw-r--r--fs/cifs/cifsencrypt.c21
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsglob.h13
-rw-r--r--fs/cifs/cifssmb.c14
-rw-r--r--fs/cifs/connect.c68
-rw-r--r--fs/cifs/file.c70
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c3
-rw-r--r--fs/cifs/sess.c23
-rw-r--r--fs/dcache.c89
-rw-r--r--fs/ecryptfs/crypto.c21
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h7
-rw-r--r--fs/ecryptfs/file.c25
-rw-r--r--fs/ecryptfs/inode.c60
-rw-r--r--fs/ecryptfs/kthread.c6
-rw-r--r--fs/ecryptfs/main.c72
-rw-r--r--fs/ecryptfs/super.c16
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext4/ext4_jbd2.h4
-rw-r--r--fs/ext4/fsync.c17
-rw-r--r--fs/ext4/inode.c35
-rw-r--r--fs/ext4/super.c74
-rw-r--r--fs/fhandle.c1
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/file.c58
-rw-r--r--fs/gfs2/glock.c6
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/inode.c56
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/gfs2/super.c14
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/namei.c1
-rw-r--r--fs/namespace.c16
-rw-r--r--fs/nfs/namespace.c58
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs4state.c8
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/partitions/ldm.c16
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/quota/dquot.c13
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/ubifs/debug.h152
-rw-r--r--fs/ubifs/file.c3
-rw-r--r--fs/ubifs/recovery.c26
-rw-r--r--fs/ubifs/super.c29
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c31
-rw-r--r--fs/xfs/linux-2.6/xfs_message.h24
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c129
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c228
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/quota/xfs_qm.c7
-rw-r--r--fs/xfs/quota/xfs_qm.h5
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c2
-rw-r--r--fs/xfs/xfs_alloc.c30
-rw-r--r--fs/xfs/xfs_inode_item.c67
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c38
-rw-r--r--fs/xfs/xfs_log_priv.h1
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_trans_ail.c421
-rw-r--r--fs/xfs/xfs_trans_priv.h22
-rw-r--r--include/linux/bit_spinlock.h8
-rw-r--r--include/linux/blkdev.h54
-rw-r--r--include/linux/can/platform/mcp251x.h2
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/input.h10
-rw-r--r--include/linux/input/mt.h6
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/list_bl.h11
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/mfd/core.h13
-rw-r--r--include/linux/netfilter.h3
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h3
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/platform_device.h5
-rw-r--r--include/linux/posix-clock.h5
-rw-r--r--include/linux/rio.h2
-rw-r--r--include/linux/rio_ids.h1
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/suspend.h11
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/vmstat.h7
-rw-r--r--include/net/9p/9p.h2
-rw-r--r--include/net/9p/client.h5
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/mac80211.h15
-rw-r--r--include/net/route.h5
-rw-r--r--include/trace/events/block.h30
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/Kconfig6
-rw-r--r--kernel/power/hibernate.c10
-rw-r--r--kernel/power/suspend.c5
-rw-r--r--kernel/sched.c20
-rw-r--r--kernel/sched_fair.c14
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/time/posix-clock.c24
-rw-r--r--kernel/trace/blktrace.c33
-rw-r--r--lib/kstrtox.c9
-rw-r--r--lib/test-kstrtox.c32
-rw-r--r--mm/huge_memory.c49
-rw-r--r--mm/memory.c28
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mlock.c13
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/oom_kill.c28
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/vmscan.c24
-rw-r--r--mm/vmstat.c18
-rw-r--r--net/9p/client.c29
-rw-r--r--net/9p/protocol.c7
-rw-r--r--net/9p/trans_common.c2
-rw-r--r--net/9p/trans_virtio.c15
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfdgml.c6
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/ceph/osd_client.c12
-rw-r--r--net/core/dev.c10
-rw-r--r--net/dsa/mv88e6131.c23
-rw-r--r--net/dsa/mv88e6xxx.h2
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inetpeer.c13
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/route.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/netfilter.c13
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c7
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c127
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c53
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_addrtype.c42
-rw-r--r--net/netfilter/xt_conntrack.c2
-rw-r--r--net/netfilter/xt_set.c18
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--scripts/kconfig/conf.c2
-rw-r--r--security/capability.c2
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/avc.c36
-rw-r--r--security/selinux/hooks.c24
-rw-r--r--security/selinux/include/avc.h18
-rw-r--r--security/smack/smack_lsm.c6
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c3
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c70
-rw-r--r--sound/pci/hda/patch_realtek.c27
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/soc/codecs/jz4740.c2
-rw-r--r--sound/soc/codecs/sn95031.c2
-rw-r--r--sound/soc/codecs/wm8903.c38
-rw-r--r--sound/soc/codecs/wm8994.c16
-rw-r--r--sound/soc/codecs/wm_hubs.c8
-rw-r--r--sound/soc/mid-x86/sst_platform.c10
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c1
-rw-r--r--sound/soc/pxa/zylonite.c6
-rw-r--r--sound/soc/samsung/pcm.c4
-rw-r--r--sound/soc/sh/fsi.c22
-rw-r--r--sound/soc/soc-core.c13
-rw-r--r--sound/soc/tegra/harmony.c1
-rw-r--r--sound/usb/midi.c1
560 files changed, 5737 insertions, 4232 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 274b32d12532..492e81df2968 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -387,26 +387,6 @@ Who: Tejun Heo <tj@kernel.org>
----------------------------
-What: Support for lcd_switch and display_get in asus-laptop driver
-When: March 2010
-Why: These two features use non-standard interfaces. There are the
- only features that really need multiple path to guess what's
- the right method name on a specific laptop.
-
- Removing them will allow to remove a lot of code an significantly
- clean the drivers.
-
- This will affect the backlight code which won't be able to know
- if the backlight is on or off. The platform display file will also be
- write only (like the one in eeepc-laptop).
-
- This should'nt affect a lot of user because they usually know
- when their display is on or off.
-
-Who: Corentin Chary <corentin.chary@gmail.com>
-
-----------------------------
-
What: sysfs-class-rfkill state file
When: Feb 2014
Files: net/rfkill/core.c
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
new file mode 100644
index 000000000000..41728999e142
--- /dev/null
+++ b/Documentation/hwmon/max16064
@@ -0,0 +1,62 @@
+Kernel driver max16064
+======================
+
+Supported chips:
+ * Maxim MAX16064
+ Prefix: 'max16064'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply
+Controller with Active-Voltage Output Control and PMBus Interface.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-4]_label "vout[1-4]"
+in[1-4]_input Measured voltage. From READ_VOUT register.
+in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+ status is set.
+temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+ status is set.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
new file mode 100644
index 000000000000..6c525dd07d59
--- /dev/null
+++ b/Documentation/hwmon/max34440
@@ -0,0 +1,79 @@
+Kernel driver max34440
+======================
+
+Supported chips:
+ * Maxim MAX34440
+ Prefixes: 'max34440'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+ * Maxim MAX34441
+ PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+ Prefixes: 'max34441'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
+Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager
+and Intelligent Fan Controller.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in[1-6]_label "vout[1-6]".
+in[1-6]_input Measured voltage. From READ_VOUT register.
+in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr[1-6]_label "iout[1-6]".
+curr[1-6]_input Measured current. From READ_IOUT register.
+curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
+curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
+
+ in6 and curr6 attributes only exist for MAX34440.
+
+temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register.
+ temp1 is the chip's internal temperature. temp2..temp5
+ are remote I2C temperature sensors. For MAX34441, temp6
+ is a remote thermal-diode sensor. For MAX34440, temp6..8
+ are remote I2C temperature sensors.
+temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register.
+temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp[1-8]_max_alarm Temperature high alarm.
+temp[1-8]_crit_alarm Temperature critical high alarm.
+
+ temp7 and temp8 attributes only exist for MAX34440.
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
new file mode 100644
index 000000000000..0ddd3a412030
--- /dev/null
+++ b/Documentation/hwmon/max8688
@@ -0,0 +1,69 @@
+Kernel driver max8688
+=====================
+
+Supported chips:
+ * Maxim MAX8688
+ Prefix: 'max8688'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply
+Controller/Monitor with PMBus Interface.
+
+The driver is a client driver to the core PMBus driver. Please see
+Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+in1_label "vout1"
+in1_input Measured voltage. From READ_VOUT register.
+in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
+in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
+in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
+in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
+in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
+in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
+
+curr1_label "iout1"
+curr1_input Measured current. From READ_IOUT register.
+curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
+curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
+curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
+curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
+
+temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
+temp1_max Maximum temperature. From OT_WARN_LIMIT register.
+temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
+temp1_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
+ status is set.
+temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
+ status is set.
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index dc4933e96344..5e462fc7f99b 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -13,26 +13,6 @@ Supported chips:
Prefix: 'ltc2978'
Addresses scanned: -
Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
- * Maxim MAX16064
- Quad Power-Supply Controller
- Prefix: 'max16064'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
- * Maxim MAX34440
- PMBus 6-Channel Power-Supply Manager
- Prefixes: 'max34440'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
- * Maxim MAX34441
- PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
- Prefixes: 'max34441'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
- * Maxim MAX8688
- Digital Power-Supply Controller/Monitor
- Prefix: 'max8688'
- Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
* Generic PMBus devices
Prefix: 'pmbus'
Addresses scanned: -
@@ -175,11 +155,13 @@ currX_crit Critical maximum current.
From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
currX_alarm Current high alarm.
From IIN_OC_WARNING or IOUT_OC_WARNING status.
+currX_max_alarm Current high alarm.
+ From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status.
currX_lcrit_alarm Output current critical low alarm.
From IOUT_UC_FAULT status.
currX_crit_alarm Current critical high alarm.
From IIN_OC_FAULT or IOUT_OC_FAULT status.
-currX_label "iin" or "vinY"
+currX_label "iin" or "ioutY"
powerX_input Measured power. From READ_PIN or READ_POUT register.
powerX_cap Output power cap. From POUT_MAX register.
@@ -193,13 +175,13 @@ powerX_crit_alarm Output power critical high alarm.
From POUT_OP_FAULT status.
powerX_label "pin" or "poutY"
-tempX_input Measured tempererature.
+tempX_input Measured temperature.
From READ_TEMPERATURE_X register.
-tempX_min Mimimum tempererature. From UT_WARN_LIMIT register.
-tempX_max Maximum tempererature. From OT_WARN_LIMIT register.
-tempX_lcrit Critical low tempererature.
+tempX_min Mimimum temperature. From UT_WARN_LIMIT register.
+tempX_max Maximum temperature. From OT_WARN_LIMIT register.
+tempX_lcrit Critical low temperature.
From UT_FAULT_LIMIT register.
-tempX_crit Critical high tempererature.
+tempX_crit Critical high temperature.
From OT_FAULT_LIMIT register.
tempX_min_alarm Chip temperature low alarm. Set by comparing
READ_TEMPERATURE_X with UT_WARN_LIMIT if
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665
index 3820fc9ca52d..59e316140542 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665
@@ -150,8 +150,8 @@ in8_crit_alarm Channel F critical alarm
in9_crit_alarm AIN1 critical alarm
in10_crit_alarm AIN2 critical alarm
-temp1_input Chip tempererature
-temp1_min Mimimum chip tempererature
-temp1_max Maximum chip tempererature
-temp1_crit Critical chip tempererature
+temp1_input Chip temperature
+temp1_min Mimimum chip temperature
+temp1_max Maximum chip temperature
+temp1_crit Critical chip temperature
temp1_crit_alarm Temperature critical alarm
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches
new file mode 100644
index 000000000000..86f42e8e9e49
--- /dev/null
+++ b/Documentation/hwmon/submitting-patches
@@ -0,0 +1,109 @@
+ How to Get Your Patch Accepted Into the Hwmon Subsystem
+ -------------------------------------------------------
+
+This text is is a collection of suggestions for people writing patches or
+drivers for the hwmon subsystem. Following these suggestions will greatly
+increase the chances of your change being accepted.
+
+
+1. General
+----------
+
+* It should be unnecessary to mention, but please read and follow
+ Documentation/SubmitChecklist
+ Documentation/SubmittingDrivers
+ Documentation/SubmittingPatches
+ Documentation/CodingStyle
+
+* If your patch generates checkpatch warnings, please refrain from explanations
+ such as "I don't like that coding style". Keep in mind that each unnecessary
+ warning helps hiding a real problem. If you don't like the kernel coding
+ style, don't write kernel drivers.
+
+* Please test your patch thoroughly. We are not your test group.
+ Sometimes a patch can not or not completely be tested because of missing
+ hardware. In such cases, you should test-build the code on at least one
+ architecture. If run-time testing was not achieved, it should be written
+ explicitly below the patch header.
+
+* If your patch (or the driver) is affected by configuration options such as
+ CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration
+ variants.
+
+
+2. Adding functionality to existing drivers
+-------------------------------------------
+
+* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
+ date.
+
+* Make sure the information in Kconfig is up to date.
+
+* If the added functionality requires some cleanup or structural changes, split
+ your patch into a cleanup part and the actual addition. This makes it easier
+ to review your changes, and to bisect any resulting problems.
+
+* Never mix bug fixes, cleanup, and functional enhancements in a single patch.
+
+
+3. New drivers
+--------------
+
+* Running your patch or driver file(s) through checkpatch does not mean its
+ formatting is clean. If unsure about formatting in your new driver, run it
+ through Lindent. Lindent is not perfect, and you may have to do some minor
+ cleanup, but it is a good start.
+
+* Consider adding yourself to MAINTAINERS.
+
+* Document the driver in Documentation/hwmon/<driver_name>.
+
+* Add the driver to Kconfig and Makefile in alphabetical order.
+
+* Make sure that all dependencies are listed in Kconfig. For new drivers, it
+ is most likely prudent to add a dependency on EXPERIMENTAL.
+
+* Avoid forward declarations if you can. Rearrange the code if necessary.
+
+* Avoid calculations in macros and macro-generated functions. While such macros
+ may save a line or so in the source, it obfuscates the code and makes code
+ review more difficult. It may also result in code which is more complicated
+ than necessary. Use inline functions or just regular functions instead.
+
+* If the driver has a detect function, make sure it is silent. Debug messages
+ and messages printed after a successful detection are acceptable, but it
+ must not print messages such as "Chip XXX not found/supported".
+
+ Keep in mind that the detect function will run for all drivers supporting an
+ address if a chip is detected on that address. Unnecessary messages will just
+ pollute the kernel log and not provide any value.
+
+* Provide a detect function if and only if a chip can be detected reliably.
+
+* Avoid writing to chip registers in the detect function. If you have to write,
+ only do it after you have already gathered enough data to be certain that the
+ detection is going to be successful.
+
+ Keep in mind that the chip might not be what your driver believes it is, and
+ writing to it might cause a bad misconfiguration.
+
+* Make sure there are no race conditions in the probe function. Specifically,
+ completely initialize your chip first, then create sysfs entries and register
+ with the hwmon subsystem.
+
+* Do not provide support for deprecated sysfs attributes.
+
+* Do not create non-standard attributes unless really needed. If you have to use
+ non-standard attributes, or you believe you do, discuss it on the mailing list
+ first. Either case, provide a detailed explanation why you need the
+ non-standard attribute(s).
+ Standard attributes are specified in Documentation/hwmon/sysfs-interface.
+
+* When deciding which sysfs attributes to support, look at the chip's
+ capabilities. While we do not expect your driver to support everything the
+ chip may offer, it should at least support all limits and alarms.
+
+* Last but not least, please check if a driver for your chip already exists
+ before starting to write a new driver. Especially for temperature sensors,
+ new chips are often variants of previously released chips. In some cases,
+ a presumably new chip may simply have been relabeled.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
new file mode 100644
index 000000000000..23fcb05175be
--- /dev/null
+++ b/Documentation/input/event-codes.txt
@@ -0,0 +1,262 @@
+The input protocol uses a map of types and codes to express input device values
+to userspace. This document describes the types and codes and how and when they
+may be used.
+
+A single hardware event generates multiple input events. Each input event
+contains the new value of a single data item. A special event type, EV_SYN, is
+used to separate input events into packets of input data changes occurring at
+the same moment in time. In the following, the term "event" refers to a single
+input event encompassing a type, code, and value.
+
+The input protocol is a stateful protocol. Events are emitted only when values
+of event codes have changed. However, the state is maintained within the Linux
+input subsystem; drivers do not need to maintain the state and may attempt to
+emit unchanged values without harm. Userspace may obtain the current state of
+event code values using the EVIOCG* ioctls defined in linux/input.h. The event
+reports supported by a device are also provided by sysfs in
+class/input/event*/device/capabilities/, and the properties of a device are
+provided in class/input/event*/device/properties.
+
+Types:
+==========
+Types are groupings of codes under a logical input construct. Each type has a
+set of applicable codes to be used in generating events. See the Codes section
+for details on valid codes for each type.
+
+* EV_SYN:
+ - Used as markers to separate events. Events may be separated in time or in
+ space, such as with the multitouch protocol.
+
+* EV_KEY:
+ - Used to describe state changes of keyboards, buttons, or other key-like
+ devices.
+
+* EV_REL:
+ - Used to describe relative axis value changes, e.g. moving the mouse 5 units
+ to the left.
+
+* EV_ABS:
+ - Used to describe absolute axis value changes, e.g. describing the
+ coordinates of a touch on a touchscreen.
+
+* EV_MSC:
+ - Used to describe miscellaneous input data that do not fit into other types.
+
+* EV_SW:
+ - Used to describe binary state input switches.
+
+* EV_LED:
+ - Used to turn LEDs on devices on and off.
+
+* EV_SND:
+ - Used to output sound to devices.
+
+* EV_REP:
+ - Used for autorepeating devices.
+
+* EV_FF:
+ - Used to send force feedback commands to an input device.
+
+* EV_PWR:
+ - A special type for power button and switch input.
+
+* EV_FF_STATUS:
+ - Used to receive force feedback device status.
+
+Codes:
+==========
+Codes define the precise type of event.
+
+EV_SYN:
+----------
+EV_SYN event values are undefined. Their usage is defined only by when they are
+sent in the evdev event stream.
+
+* SYN_REPORT:
+ - Used to synchronize and separate events into packets of input data changes
+ occurring at the same moment in time. For example, motion of a mouse may set
+ the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
+ motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
+
+* SYN_CONFIG:
+ - TBD
+
+* SYN_MT_REPORT:
+ - Used to synchronize and separate touch events. See the
+ multi-touch-protocol.txt document for more information.
+
+* SYN_DROPPED:
+ - Used to indicate buffer overrun in the evdev client's event queue.
+ Client should ignore all events up to and including next SYN_REPORT
+ event and query the device (using EVIOCG* ioctls) to obtain its
+ current state.
+
+EV_KEY:
+----------
+EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
+to represent the 'A' key on a keyboard. When a key is depressed, an event with
+the key's code is emitted with value 1. When the key is released, an event is
+emitted with value 0. Some hardware send events when a key is repeated. These
+events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
+BTN_<name> is used for other types of momentary switch events.
+
+A few EV_KEY codes have special meanings:
+
+* BTN_TOOL_<name>:
+ - These codes are used in conjunction with input trackpads, tablets, and
+ touchscreens. These devices may be used with fingers, pens, or other tools.
+ When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
+ code should be set to a value of 1. When the tool is no longer interacting
+ with the input device, the BTN_TOOL_<name> code should be reset to 0. All
+ trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
+ code when events are generated.
+
+* BTN_TOUCH:
+ BTN_TOUCH is used for touch contact. While an input tool is determined to be
+ within meaningful physical contact, the value of this property must be set
+ to 1. Meaningful physical contact may mean any contact, or it may mean
+ contact conditioned by an implementation defined property. For example, a
+ touchpad may set the value to 1 only when the touch pressure rises above a
+ certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
+ example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
+ pen is hovering over but not touching the tablet surface.
+
+Note: For appropriate function of the legacy mousedev emulation driver,
+BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
+
+Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
+interpreted as a touchpad by userspace, while a similar device without
+BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
+with current userspace it is recommended to follow this distinction. In the
+future, this distinction will be deprecated and the device properties ioctl
+EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
+
+* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
+ - These codes denote one, two, three, and four finger interaction on a
+ trackpad or touchscreen. For example, if the user uses two fingers and moves
+ them on the touchpad in an effort to scroll content on screen,
+ BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
+ Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
+ purpose. A trackpad event generated by finger touches should generate events
+ for one code from each group. At most only one of these BTN_TOOL_<name>
+ codes should have a value of 1 during any synchronization frame.
+
+Note: Historically some drivers emitted multiple of the finger count codes with
+a value of 1 in the same synchronization frame. This usage is deprecated.
+
+Note: In multitouch drivers, the input_mt_report_finger_count() function should
+be used to emit these codes. Please see multi-touch-protocol.txt for details.
+
+EV_REL:
+----------
+EV_REL events describe relative changes in a property. For example, a mouse may
+move to the left by a certain number of units, but its absolute position in
+space is unknown. If the absolute position is known, EV_ABS codes should be used
+instead of EV_REL codes.
+
+A few EV_REL codes have special meanings:
+
+* REL_WHEEL, REL_HWHEEL:
+ - These codes are used for vertical and horizontal scroll wheels,
+ respectively.
+
+EV_ABS:
+----------
+EV_ABS events describe absolute changes in a property. For example, a touchpad
+may emit coordinates for a touch location.
+
+A few EV_ABS codes have special meanings:
+
+* ABS_DISTANCE:
+ - Used to describe the distance of a tool from an interaction surface. This
+ event should only be emitted while the tool is hovering, meaning in close
+ proximity of the device and while the value of the BTN_TOUCH code is 0. If
+ the input device may be used freely in three dimensions, consider ABS_Z
+ instead.
+
+* ABS_MT_<name>:
+ - Used to describe multitouch input events. Please see
+ multi-touch-protocol.txt for details.
+
+EV_SW:
+----------
+EV_SW events describe stateful binary switches. For example, the SW_LID code is
+used to denote when a laptop lid is closed.
+
+Upon binding to a device or resuming from suspend, a driver must report
+the current switch state. This ensures that the device, kernel, and userspace
+state is in sync.
+
+Upon resume, if the switch state is the same as before suspend, then the input
+subsystem will filter out the duplicate switch state reports. The driver does
+not need to keep the state of the switch at any time.
+
+EV_MSC:
+----------
+EV_MSC events are used for input and output events that do not fall under other
+categories.
+
+EV_LED:
+----------
+EV_LED events are used for input and output to set and query the state of
+various LEDs on devices.
+
+EV_REP:
+----------
+EV_REP events are used for specifying autorepeating events.
+
+EV_SND:
+----------
+EV_SND events are used for sending sound commands to simple sound output
+devices.
+
+EV_FF:
+----------
+EV_FF events are used to initialize a force feedback capable device and to cause
+such device to feedback.
+
+EV_PWR:
+----------
+EV_PWR events are a special type of event used specifically for power
+mangement. Its usage is not well defined. To be addressed later.
+
+Guidelines:
+==========
+The guidelines below ensure proper single-touch and multi-finger functionality.
+For multi-touch functionality, see the multi-touch-protocol.txt document for
+more information.
+
+Mice:
+----------
+REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
+the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
+further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
+scroll wheel events where available.
+
+Touchscreens:
+----------
+ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
+used to report when a touch is active on the screen.
+BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
+contact. BTN_TOOL_<name> events should be reported where possible.
+
+Trackpads:
+----------
+Legacy trackpads that only provide relative position information must report
+events like mice described above.
+
+Trackpads that provide absolute touch position must report ABS_{X,Y} for the
+location of the touch. BTN_TOUCH should be used to report when a touch is active
+on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
+be used to report the number of touches active on the trackpad.
+
+Tablets:
+----------
+BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
+the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
+should be used to report when the tool is in contact with the tablet.
+BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
+button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
+BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
+meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
+purpose on the device.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index a81c7b4790f2..2366b1c8cf19 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -552,6 +552,16 @@ also have
within the array where IO will be blocked. This is currently
only supported for raid4/5/6.
+ sync_min
+ sync_max
+ The two values, given as numbers of sectors, indicate a range
+ withing the array where 'check'/'repair' will operate. Must be
+ a multiple of chunk_size. When it reaches "sync_max" it will
+ pause, rather than complete.
+ You can use 'select' or 'poll' on "sync_completed" to wait for
+ that number to reach sync_max. Then you can either increase
+ "sync_max", or can write 'idle' to "sync_action".
+
Each active md device may also have attributes specific to the
personality module that manages it.
diff --git a/Documentation/sound/alsa/SB-Live-mixer.txt b/Documentation/sound/alsa/SB-Live-mixer.txt
index f5639d40521d..f4b5988f450c 100644
--- a/Documentation/sound/alsa/SB-Live-mixer.txt
+++ b/Documentation/sound/alsa/SB-Live-mixer.txt
@@ -87,14 +87,14 @@ accumulator. ALSA uses accumulators 0 and 1 for left and right PCM.
The result is forwarded to the ADC capture FIFO (thus to the standard capture
PCM device).
-name='Music Playback Volume',index=0
+name='Synth Playback Volume',index=0
This control is used to attenuate samples for left and right MIDI FX-bus
accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
-name='Music Capture Volume',index=0
-name='Music Capture Switch',index=0
+name='Synth Capture Volume',index=0
+name='Synth Capture Switch',index=0
These controls are used to attenuate samples for left and right MIDI FX-bus
accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
diff --git a/MAINTAINERS b/MAINTAINERS
index 6b4b9cdec370..13803127b68f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -151,6 +151,7 @@ S: Maintained
F: drivers/net/hamradio/6pack.c
8169 10/100/1000 GIGABIT ETHERNET DRIVER
+M: Realtek linux nic maintainers <nic_swsd@realtek.com>
M: Francois Romieu <romieu@fr.zoreil.com>
L: netdev@vger.kernel.org
S: Maintained
@@ -184,10 +185,9 @@ F: Documentation/filesystems/9p.txt
F: fs/9p/
A2232 SERIAL BOARD DRIVER
-M: Enver Haase <A2232@gmx.net>
L: linux-m68k@lists.linux-m68k.org
-S: Maintained
-F: drivers/char/ser_a2232*
+S: Orphan
+F: drivers/staging/generic_serial/ser_a2232*
AACRAID SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
@@ -877,6 +877,13 @@ F: arch/arm/mach-mv78xx0/
F: arch/arm/mach-orion5x/
F: arch/arm/plat-orion/
+ARM/Orion SoC/Technologic Systems TS-78xx platform support
+M: Alexander Clouter <alex@digriz.org.uk>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W: http://www.digriz.org.uk/ts78xx/kernel
+S: Maintained
+F: arch/arm/mach-orion5x/ts78xx-*
+
ARM/MIOA701 MACHINE SUPPORT
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1063,7 +1070,7 @@ F: arch/arm/mach-shmobile/
F: drivers/sh/
ARM/TELECHIPS ARM ARCHITECTURE
-M: "Hans J. Koch" <hjk@linutronix.de>
+M: "Hans J. Koch" <hjk@hansjkoch.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/plat-tcc/
@@ -1823,11 +1830,10 @@ S: Maintained
F: drivers/platform/x86/compal-laptop.c
COMPUTONE INTELLIPORT MULTIPORT CARD
-M: "Michael H. Warfield" <mhw@wittsend.com>
W: http://www.wittsend.com/computone.html
-S: Maintained
+S: Orphan
F: Documentation/serial/computone.txt
-F: drivers/char/ip2/
+F: drivers/staging/tty/ip2/
CONEXANT ACCESSRUNNER USB DRIVER
M: Simon Arlott <cxacru@fire.lp0.eu>
@@ -2010,7 +2016,7 @@ F: drivers/net/wan/cycx*
CYCLADES ASYNC MUX DRIVER
W: http://www.cyclades.com/
S: Orphan
-F: drivers/char/cyclades.c
+F: drivers/tty/cyclades.c
F: include/linux/cyclades.h
CYCLADES PC300 DRIVER
@@ -2124,8 +2130,8 @@ L: Eng.Linux@digi.com
W: http://www.digi.com
S: Orphan
F: Documentation/serial/digiepca.txt
-F: drivers/char/epca*
-F: drivers/char/digi*
+F: drivers/staging/tty/epca*
+F: drivers/staging/tty/digi*
DIOLAN U2C-12 I2C DRIVER
M: Guenter Roeck <guenter.roeck@ericsson.com>
@@ -4077,7 +4083,7 @@ F: drivers/video/matrox/matroxfb_*
F: include/linux/matroxfb.h
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
-M: "Hans J. Koch" <hjk@linutronix.de>
+M: "Hans J. Koch" <hjk@hansjkoch.de>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/max6650
@@ -4192,7 +4198,7 @@ MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
M: Jiri Slaby <jirislaby@gmail.com>
S: Maintained
F: Documentation/serial/moxa-smartio
-F: drivers/char/mxser.*
+F: drivers/tty/mxser.*
MSI LAPTOP SUPPORT
M: "Lee, Chun-Yi" <jlee@novell.com>
@@ -4234,7 +4240,7 @@ F: sound/oss/msnd*
MULTITECH MULTIPORT CARD (ISICOM)
S: Orphan
-F: drivers/char/isicom.c
+F: drivers/tty/isicom.c
F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
@@ -5273,14 +5279,14 @@ F: drivers/memstick/host/r592.*
RISCOM8 DRIVER
S: Orphan
F: Documentation/serial/riscom8.txt
-F: drivers/char/riscom8*
+F: drivers/staging/tty/riscom8*
ROCKETPORT DRIVER
P: Comtrol Corp.
W: http://www.comtrol.com
S: Maintained
F: Documentation/serial/rocket.txt
-F: drivers/char/rocket*
+F: drivers/tty/rocket*
ROSE NETWORK LAYER
M: Ralf Baechle <ralf@linux-mips.org>
@@ -5390,7 +5396,7 @@ F: drivers/media/video/*7146*
F: include/media/*7146*
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Jassi Brar <jassi.brar@samsung.com>
+M: Jassi Brar <jassisinghbrar@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/samsung
@@ -5916,10 +5922,9 @@ F: arch/arm/mach-spear6xx/spear600.c
F: arch/arm/mach-spear6xx/spear600_evb.c
SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
-M: Roger Wolff <R.E.Wolff@BitWizard.nl>
-S: Supported
+S: Orphan
F: Documentation/serial/specialix.txt
-F: drivers/char/specialix*
+F: drivers/staging/tty/specialix*
SPI SUBSYSTEM
M: David Brownell <dbrownell@users.sourceforge.net>
@@ -5964,7 +5969,6 @@ F: arch/alpha/kernel/srm_env.c
STABLE BRANCH
M: Greg Kroah-Hartman <greg@kroah.com>
-M: Chris Wright <chrisw@sous-sol.org>
L: stable@kernel.org
S: Maintained
@@ -6248,7 +6252,8 @@ M: Greg Ungerer <gerg@uclinux.org>
W: http://www.uclinux.org/
L: uclinux-dev@uclinux.org (subscribers-only)
S: Maintained
-F: arch/m68knommu/
+F: arch/m68k/*/*_no.*
+F: arch/m68k/include/asm/*_no.*
UCLINUX FOR RENESAS H8/300 (H8300)
M: Yoshinori Sato <ysato@users.sourceforge.jp>
@@ -6618,7 +6623,7 @@ F: fs/hostfs/
F: fs/hppfs/
USERSPACE I/O (UIO)
-M: "Hans J. Koch" <hjk@linutronix.de>
+M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@suse.de>
S: Maintained
F: Documentation/DocBook/uio-howto.tmpl
@@ -6916,6 +6921,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
S: Maintained
F: drivers/platform/x86
+XEN NETWORK BACKEND DRIVER
+M: Ian Campbell <ian.campbell@citrix.com>
+L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/xen-netback/*
+
XEN PCI SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: xen-devel@lists.xensource.com (moderated for non-subscribers)
diff --git a/Makefile b/Makefile
index 8392b64079df..b967b967572b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 39
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 9bb7b858ed23..7a6d908bb865 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -4,7 +4,7 @@
extra-y := head.o vmlinux.lds
asflags-y := $(KBUILD_CFLAGS)
-ccflags-y := -Werror -Wno-sign-compare
+ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index 381fec0af52e..da7bcc372f16 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1,
{
unsigned long flags;
unsigned long mid = MCPCIA_HOSE2MID(hose->index);
- unsigned int stat0, value, temp, cpu;
+ unsigned int stat0, value, cpu;
cpu = smp_processor_id();
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1,
stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
*(vuip)MCPCIA_CAP_ERR(mid) = stat0;
mb();
- temp = *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid);
DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
mb();
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
{
unsigned long flags;
unsigned long mid = MCPCIA_HOSE2MID(hose->index);
- unsigned int stat0, temp, cpu;
+ unsigned int stat0, cpu;
cpu = smp_processor_id();
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
/* Reset status register to avoid losing errors. */
stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
*(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
- temp = *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid);
DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
draina();
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
*((vuip)addr) = value;
mb();
mb(); /* magic */
- temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
+ *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
mcheck_expected(cpu) = 0;
mb();
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr)
void
mcpcia_machine_check(unsigned long vector, unsigned long la_ptr)
{
- struct el_common *mchk_header;
struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
unsigned int cpu = smp_processor_id();
int expected;
- mchk_header = (struct el_common *)la_ptr;
mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
expected = mcheck_expected(cpu);
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index c3b3781a03de..14b26c466c89 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = {
static struct el_subpacket *
el_process_regatta_subpacket(struct el_subpacket *header)
{
- int status;
-
if (header->class != EL_CLASS__REGATTA_FAMILY) {
printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
err_print_prefix,
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header)
printk("%s ** Occurred on CPU %d:\n",
err_print_prefix,
(int)header->by_type.regatta_frame.cpuid);
- status = privateer_process_logout_frame((struct el_common *)
+ privateer_process_logout_frame((struct el_common *)
header->by_type.regatta_frame.data_start, 1);
break;
default:
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 1479dc6ebd97..51b7fbd9e4c1 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
void __init
init_rtc_irq(void)
{
- irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
+ irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
handle_simple_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction);
}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index d2634e4476b4..edbddcbd5bc6 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type)
case PCA56_CPU:
case PCA57_CPU:
{
- unsigned long cbox_config, size;
-
if (cpu_type == PCA56_CPU) {
L1I = CSHAPE(16*1024, 6, 1);
L1D = CSHAPE(8*1024, 5, 1);
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type)
}
L3 = -1;
+#if 0
+ unsigned long cbox_config, size;
+
cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
-#if 0
L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
#else
L2 = external_cache_probe(512*1024, 6);
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
index 3e6a2893af9f..6886b834f487 100644
--- a/arch/alpha/kernel/smc37c93x.c
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -79,7 +79,6 @@
static unsigned long __init SMCConfigState(unsigned long baseAddr)
{
unsigned char devId;
- unsigned char devRev;
unsigned long configPort;
unsigned long indexPort;
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr)
devId = inb(dataPort);
if (devId == VALID_DEVICE_ID) {
outb(DEVICE_REV, indexPort);
- devRev = inb(dataPort);
+ /* unsigned char devRev = */ inb(dataPort);
break;
}
else
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index d3cb28bb8eb0..d92cdc715c65 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -156,7 +156,6 @@ static void __init
wildfire_init_irq_per_pca(int qbbno, int pcano)
{
int i, irq_bias;
- unsigned long io_bias;
static struct irqaction isa_enable = {
.handler = no_action,
.name = "isa_enable",
@@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+ pcano * WILDFIRE_IRQ_PER_PCA;
+#if 0
+ unsigned long io_bias;
+
/* Only need the following for first PCI bus per PCA. */
io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
-#if 0
outb(0, DMA1_RESET_REG + io_bias);
outb(0, DMA2_RESET_REG + io_bias);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index a58e84f1a63b..918e8e0b72ff 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts)
year += 100;
ts->tv_sec = mktime(year, mon, day, hour, min, sec);
+ ts->tv_nsec = 0;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fdc9d4dbf85b..377a7a595b08 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1540,7 +1540,6 @@ config HIGHMEM
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem"
depends on HIGHMEM
- depends on !OUTER_CACHE
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
@@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
depends on !ARCH_S5P64X0 && !ARCH_S5P6442
+ depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
+ CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
def_bool y
endmenu
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 494224a9b459..03d01d783e3b 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,17 +63,6 @@ config DEBUG_USER
8 - SIGSEGV faults
16 - SIGBUS faults
-config DEBUG_ERRORS
- bool "Verbose kernel error messages"
- depends on DEBUG_KERNEL
- help
- This option controls verbose debugging information which can be
- printed when the kernel detects an internal error. This debugging
- information is useful to kernel hackers when tracking down problems,
- but mostly meaningless to other people. It's safe to say Y unless
- you are concerned with the code size or don't want to see these
- messages.
-
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index e7521bca2c35..6ea9b6f3607a 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_ARCH_IXP2000) += uengine.o
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
-obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ed5bc9e05a4e..cd4458f64171 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -2,6 +2,7 @@
#define __ASM_ARM_CPUTYPE_H
#include <linux/stringify.h>
+#include <linux/kernel.h>
#define CPUID_ID 0
#define CPUID_CACHETYPE 1
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index c4391ba20350..1dc980675894 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
#define THREAD_NOTIFY_FLUSH 0
#define THREAD_NOTIFY_EXIT 1
#define THREAD_NOTIFY_SWITCH 2
+#define THREAD_NOTIFY_COPY 3
#endif
#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c891eb76c0e3..87dbe3e21970 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -396,6 +396,10 @@
#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
+#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370)
+#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
+#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
+#define __NR_syncfs (__NR_SYSCALL_BASE+373)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 74554f1742d7..8d95446150a3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
-obj-$(CONFIG_PM) += sleep.o
+obj-$(CONFIG_PM_SLEEP) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 5c26eccef998..7fbf28c35bb2 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -379,6 +379,10 @@
CALL(sys_fanotify_init)
CALL(sys_fanotify_mark)
CALL(sys_prlimit64)
+/* 370 */ CALL(sys_name_to_handle_at)
+ CALL(sys_open_by_handle_at)
+ CALL(sys_clock_adjtime)
+ CALL(sys_syncfs)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index d4a0da1e48f4..9b05c6a0dcea 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch);
void elf_set_personality(const struct elf32_hdr *x)
{
unsigned int eflags = x->e_flags;
- unsigned int personality = PER_LINUX_32BIT;
+ unsigned int personality = current->personality & ~PER_MASK;
+
+ /*
+ * We only support Linux ELF executables, so always set the
+ * personality to LINUX.
+ */
+ personality |= PER_LINUX;
/*
* APCS-26 is only valid for OABI executables
*/
- if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
- if (eflags & EF_ARM_APCS_26)
- personality = PER_LINUX;
- }
+ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
+ (eflags & EF_ARM_APCS_26))
+ personality &= ~ADDR_LIMIT_32BIT;
+ else
+ personality |= ADDR_LIMIT_32BIT;
set_personality(personality);
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 8dbc126f7152..87acc25d7a3e 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info)
*/
asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
isb();
+
+ /*
+ * Clear any configured vector-catch events before
+ * enabling monitor mode.
+ */
+ asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
+ isb();
}
if (enable_monitor_mode())
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 69cfee0fe00f..979da3947f42 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -221,7 +221,7 @@ again:
prev_raw_count &= armpmu->max_period;
if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count;
+ delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
else
delta = new_raw_count - prev_raw_count;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94bbedbed639..5e1e54197227 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (clone_flags & CLONE_SETTLS)
thread->tp_value = regs->ARM_r3;
+ thread_notify(THREAD_NOTIFY_COPY, thread);
+
return 0;
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f0000e188c8c..3b54ad19d489 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
struct thread_info *thread = current_thread_info();
siginfo_t info;
- if (current->personality != PER_LINUX &&
- current->personality != PER_LINUX_32BIT &&
+ if ((current->personality & PER_MASK) != PER_LINUX &&
thread->exec_domain->handler) {
thread->exec_domain->handler(n, regs);
return regs->ARM_r0;
diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h
index ee8b02ed8011..7bfb827f3fe3 100644
--- a/arch/arm/mach-mmp/include/mach/gpio.h
+++ b/arch/arm/mach-mmp/include/mach/gpio.h
@@ -10,7 +10,7 @@
#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
#define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x))))
-#define NR_BUILTIN_GPIO (192)
+#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
#define gpio_to_bank(gpio) ((gpio) >> 5)
#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio))
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
index 4621067c7720..713be155a44d 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
@@ -8,6 +8,15 @@
#define MFP_DRIVE_MEDIUM (0x2 << 13)
#define MFP_DRIVE_FAST (0x3 << 13)
+#undef MFP_CFG
+#undef MFP_CFG_DRV
+
+#define MFP_CFG(pin, af) \
+ (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM)
+
+#define MFP_CFG_DRV(pin, af, drv) \
+ (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv)
+
/* GPIO */
#define GPIO0_GPIO MFP_CFG(GPIO0, AF5)
#define GPIO1_GPIO MFP_CFG(GPIO1, AF5)
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 7f568611547e..6a96911b0ad5 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
static void __init qsd8x50_init_mmc(void)
{
- if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa())
- vreg_mmc = vreg_get(NULL, "gp6");
- else
- vreg_mmc = vreg_get(NULL, "gp5");
+ vreg_mmc = vreg_get(NULL, "gp5");
if (IS_ERR(vreg_mmc)) {
pr_err("vreg get for vreg_mmc failed (%ld)\n",
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 56f920c55b6a..38b95e949d13 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
/* Use existing clock_event for cpu 0 */
if (!smp_processor_id())
- return;
+ return 0;
writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h
index b024a8b37439..c4639502efca 100644
--- a/arch/arm/mach-pxa/include/mach/gpio.h
+++ b/arch/arm/mach-pxa/include/mach/gpio.h
@@ -99,11 +99,24 @@
#define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2))
-#define NR_BUILTIN_GPIO 128
+#define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM
#define gpio_to_bank(gpio) ((gpio) >> 5)
#define gpio_to_irq(gpio) IRQ_GPIO(gpio)
-#define irq_to_gpio(irq) IRQ_TO_GPIO(irq)
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ int gpio;
+
+ if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1)
+ return irq - IRQ_GPIO0;
+
+ gpio = irq - PXA_GPIO_IRQ_BASE;
+ if (gpio >= 2 && gpio < NR_BUILTIN_GPIO)
+ return gpio;
+
+ return -1;
+}
#ifdef CONFIG_CPU_PXA26x
/* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted,
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index a4285fc00878..038402404e39 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -93,9 +93,6 @@
#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x))
#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
-#define IRQ_TO_GPIO_2_x(i) ((i) - PXA_GPIO_IRQ_BASE)
-#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i))
-
/*
* The following interrupts are for board specific purposes. Since
* the kernel can only run on one machine at a time, we can re-use
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 6bde5956358d..a4af8c52d7ee 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {}
static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
{
- int gpio = IRQ_TO_GPIO(d->irq);
+ int gpio = irq_to_gpio(d->irq);
uint32_t mask = 0;
if (gpio >= 0 && gpio < 85)
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 1cb5d0f9723f..909756eaf4b7 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {}
*/
static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
{
- int gpio = IRQ_TO_GPIO(d->irq);
+ int gpio = irq_to_gpio(d->irq);
uint32_t mask;
if (gpio >= 0 && gpio < 128)
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 0db2411ef4bb..716662008ce2 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -409,6 +409,10 @@ struct platform_device s3c24xx_pwm_device = {
.num_resources = 0,
};
+static struct platform_device gta02_dfbmcs320_device = {
+ .name = "dfbmcs320",
+};
+
static struct i2c_board_info gta02_i2c_devs[] __initdata = {
{
I2C_BOARD_INFO("pcf50633", 0x73),
@@ -523,6 +527,7 @@ static struct platform_device *gta02_devices[] __initdata = {
&s3c_device_iis,
&samsung_asoc_dma,
&s3c_device_i2c0,
+ &gta02_dfbmcs320_device,
&gta02_buttons_device,
&s3c_device_adc,
&s3c_device_ts,
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index 76a3f654220f..65a1aba6823d 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
void tegra_gpio_resume(void)
{
unsigned long flags;
- int b, p, i;
+ int b;
+ int p;
local_irq_save(flags);
@@ -280,7 +281,8 @@ void tegra_gpio_resume(void)
void tegra_gpio_suspend(void)
{
unsigned long flags;
- int b, p, i;
+ int b;
+ int p;
local_irq_save(flags);
for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 6d7c4eea4dcb..4459470c052d 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
{
unsigned long flags;
int ret;
+ long new_rate = rate;
- rate = clk_round_rate(c->parent, rate);
- if (rate < 0)
- return rate;
+ new_rate = clk_round_rate(c->parent, new_rate);
+ if (new_rate < 0)
+ return new_rate;
spin_lock_irqsave(&c->parent->spinlock, flags);
- c->u.shared_bus_user.rate = rate;
+ c->u.shared_bus_user.rate = new_rate;
ret = tegra_clk_shared_bus_update(c->parent);
spin_unlock_irqrestore(&c->parent->spinlock, flags);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index af913741e6ec..6e1907fa94f0 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -178,16 +178,15 @@ static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
.irq = NOMADIK_GPIO_TO_IRQ(217),
.platform_data = &mop500_tc35892_data,
},
-};
-
-/* I2C0 devices only available prior to HREFv60 */
-static struct i2c_board_info __initdata mop500_i2c0_old_devices[] = {
+ /* I2C0 devices only available prior to HREFv60 */
{
I2C_BOARD_INFO("tps61052", 0x33),
.platform_data = &mop500_tps61052_data,
},
};
+#define NUM_PRE_V60_I2C0_DEVICES 1
+
static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
{
/* lp5521 LED driver, 1st device */
@@ -425,6 +424,8 @@ static void __init mop500_uart_init(void)
static void __init mop500_init_machine(void)
{
+ int i2c0_devs;
+
/*
* The HREFv60 board removed a GPIO expander and routed
* all these GPIO pins to the internal GPIO controller
@@ -448,11 +449,11 @@ static void __init mop500_init_machine(void)
platform_device_register(&ab8500_device);
- i2c_register_board_info(0, mop500_i2c0_devices,
- ARRAY_SIZE(mop500_i2c0_devices));
- if (!machine_is_hrefv60())
- i2c_register_board_info(0, mop500_i2c0_old_devices,
- ARRAY_SIZE(mop500_i2c0_old_devices));
+ i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
+ if (machine_is_hrefv60())
+ i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
+
+ i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
i2c_register_board_info(2, mop500_i2c2_devices,
ARRAY_SIZE(mop500_i2c2_devices));
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index afe209e1e1f8..74be05f3e03a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -7,6 +7,7 @@
#include <linux/shm.h>
#include <linux/sched.h>
#include <linux/io.h>
+#include <linux/personality.h>
#include <linux/random.h>
#include <asm/cputype.h>
#include <asm/system.h>
@@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
mm->cached_hole_size = 0;
}
/* 8 bits of randomness in 20 address space bits */
- if (current->flags & PF_RANDOMIZE)
+ if ((current->flags & PF_RANDOMIZE) &&
+ !(current->personality & ADDR_NO_RANDOMIZE))
addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
full_search:
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index b46eb21f05c7..bf8a1d1cccb6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r7, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 6a4bdb2c94a7..0ed85d930c09 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r7, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 74483d1977fe..184a9c997e36 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4*4
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r7, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bfa0c9f611c5..7c99cb4c8e4f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c35618e42f6f..babfba09c89f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -211,7 +211,7 @@ cpu_v7_name:
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 63d8b2044e84..596213699f37 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 8
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r10, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 086038cd86ab..ce233bcbf506 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 7
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r10, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c
index d592b6304b48..d15dc47b0e3d 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-s5p/pm.c
@@ -19,17 +19,6 @@
#define PFX "s5p pm: "
-/* s3c_pm_check_resume_pin
- *
- * check to see if the pin is configured correctly for sleep mode, and
- * make any necessary adjustments if it is not
-*/
-
-static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
-{
- /* nothing here yet */
-}
-
/* s3c_pm_configure_extint
*
* configure all external interrupt pins
diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c
index e4baf76f374a..6b733fafe7cd 100644
--- a/arch/arm/plat-samsung/pm-check.c
+++ b/arch/arm/plat-samsung/pm-check.c
@@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
*/
static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
{
- void *save_at = phys_to_virt(s3c_sleep_save_phys);
unsigned long addr;
unsigned long left;
void *stkpage;
@@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
goto skip_check;
}
- if (in_region(ptr, left, save_at, 32*4 )) {
- S3C_PMDBG("skipping %08lx, has save block in\n", addr);
- goto skip_check;
- }
-
/* calculate and check the checksum */
calc = crc32_le(~0, ptr, left);
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index d5b58d31903c..5c0a440d6e16 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count)
*
* print any IRQs asserted at resume time (ie, we woke from)
*/
-static void s3c_pm_show_resume_irqs(int start, unsigned long which,
- unsigned long mask)
+static void __maybe_unused s3c_pm_show_resume_irqs(int start,
+ unsigned long which,
+ unsigned long mask)
{
int i;
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index bbf3da012afd..f74695075e64 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread)
put_cpu();
}
+static void vfp_thread_copy(struct thread_info *thread)
+{
+ struct thread_info *parent = current_thread_info();
+
+ vfp_sync_hwstate(parent);
+ thread->vfpstate = parent->vfpstate;
+}
+
/*
* When this function is called with the following 'cmd's, the following
* is true while this function is being run:
@@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread)
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
struct thread_info *thread = v;
+ u32 fpexc;
+#ifdef CONFIG_SMP
+ unsigned int cpu;
+#endif
- if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
- u32 fpexc = fmrx(FPEXC);
+ switch (cmd) {
+ case THREAD_NOTIFY_SWITCH:
+ fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
- unsigned int cpu = thread->cpu;
+ cpu = thread->cpu;
/*
* On SMP, if VFP is enabled, save the old state in
@@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
* old state.
*/
fmxr(FPEXC, fpexc & ~FPEXC_EN);
- return NOTIFY_DONE;
- }
+ break;
- if (cmd == THREAD_NOTIFY_FLUSH)
+ case THREAD_NOTIFY_FLUSH:
vfp_thread_flush(thread);
- else
+ break;
+
+ case THREAD_NOTIFY_EXIT:
vfp_thread_exit(thread);
+ break;
+
+ case THREAD_NOTIFY_COPY:
+ vfp_thread_copy(thread);
+ break;
+ }
return NOTIFY_DONE;
}
diff --git a/arch/avr32/include/asm/setup.h b/arch/avr32/include/asm/setup.h
index ff5b7cf6be4d..160543dbec7e 100644
--- a/arch/avr32/include/asm/setup.h
+++ b/arch/avr32/include/asm/setup.h
@@ -94,6 +94,13 @@ struct tag_ethernet {
#define ETH_INVALID_PHY 0xff
+/* board information */
+#define ATAG_BOARDINFO 0x54410008
+
+struct tag_boardinfo {
+ u32 board_number;
+};
+
struct tag {
struct tag_header hdr;
union {
@@ -102,6 +109,7 @@ struct tag {
struct tag_cmdline cmdline;
struct tag_clock clock;
struct tag_ethernet ethernet;
+ struct tag_boardinfo boardinfo;
} u;
};
@@ -128,6 +136,7 @@ extern struct tag *bootloader_tags;
extern resource_size_t fbmem_start;
extern resource_size_t fbmem_size;
+extern u32 board_number;
void setup_processor(void);
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 5c7083916c33..bb0974cce4ac 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -391,6 +391,21 @@ static int __init parse_tag_clock(struct tag *tag)
__tagtable(ATAG_CLOCK, parse_tag_clock);
/*
+ * The board_number correspond to the bd->bi_board_number in U-Boot. This
+ * parameter is only available during initialisation and can be used in some
+ * kind of board identification.
+ */
+u32 __initdata board_number;
+
+static int __init parse_tag_boardinfo(struct tag *tag)
+{
+ board_number = tag->u.boardinfo.board_number;
+
+ return 0;
+}
+__tagtable(ATAG_BOARDINFO, parse_tag_boardinfo);
+
+/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
* declarations.
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index b91b2044af9c..7aa25756412f 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -95,28 +95,6 @@ void _exception(long signr, struct pt_regs *regs, int code,
info.si_code = code;
info.si_addr = (void __user *)addr;
force_sig_info(signr, &info, current);
-
- /*
- * Init gets no signals that it doesn't have a handler for.
- * That's all very well, but if it has caused a synchronous
- * exception and we ignore the resulting signal, it will just
- * generate the same exception over and over again and we get
- * nowhere. Better to kill it and let the kernel panic.
- */
- if (is_global_init(current)) {
- __sighandler_t handler;
-
- spin_lock_irq(&current->sighand->siglock);
- handler = current->sighand->action[signr-1].sa.sa_handler;
- spin_unlock_irq(&current->sighand->siglock);
- if (handler == SIG_DFL) {
- /* init has generated a synchronous exception
- and it doesn't have a handler for the signal */
- printk(KERN_CRIT "init has generated signal %ld "
- "but has no handler for it\n", signr);
- do_exit(signr);
- }
- }
}
asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 442f08c5e641..86925fd6ea5b 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -35,22 +35,30 @@ void at32_clk_register(struct clk *clk)
spin_unlock(&clk_list_lock);
}
-struct clk *clk_get(struct device *dev, const char *id)
+static struct clk *__clk_get(struct device *dev, const char *id)
{
struct clk *clk;
- spin_lock(&clk_list_lock);
-
list_for_each_entry(clk, &at32_clock_list, list) {
if (clk->dev == dev && strcmp(id, clk->name) == 0) {
- spin_unlock(&clk_list_lock);
return clk;
}
}
- spin_unlock(&clk_list_lock);
return ERR_PTR(-ENOENT);
}
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ struct clk *clk;
+
+ spin_lock(&clk_list_lock);
+ clk = __clk_get(dev, id);
+ spin_unlock(&clk_list_lock);
+
+ return clk;
+}
+
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
@@ -257,15 +265,15 @@ static int clk_show(struct seq_file *s, void *unused)
spin_lock(&clk_list_lock);
/* show clock tree as derived from the three oscillators */
- clk = clk_get(NULL, "osc32k");
+ clk = __clk_get(NULL, "osc32k");
dump_clock(clk, &r);
clk_put(clk);
- clk = clk_get(NULL, "osc0");
+ clk = __clk_get(NULL, "osc0");
dump_clock(clk, &r);
clk_put(clk);
- clk = clk_get(NULL, "osc1");
+ clk = __clk_get(NULL, "osc1");
dump_clock(clk, &r);
clk_put(clk);
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 47ba4b9b6db1..fbc2aeaebddb 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -61,34 +61,34 @@ struct eic {
static struct eic *nmi_eic;
static bool nmi_enabled;
-static void eic_ack_irq(struct irq_chip *d)
+static void eic_ack_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
}
-static void eic_mask_irq(struct irq_chip *d)
+static void eic_mask_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
}
-static void eic_mask_ack_irq(struct irq_chip *d)
+static void eic_mask_ack_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq));
eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq));
}
-static void eic_unmask_irq(struct irq_chip *d)
+static void eic_unmask_irq(struct irq_data *d)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
eic_writel(eic, IER, 1 << (d->irq - eic->first_irq));
}
-static int eic_set_irq_type(struct irq_chip *d, unsigned int flow_type)
+static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- struct eic *eic = irq_data_get_irq_chip_data(data);
+ struct eic *eic = irq_data_get_irq_chip_data(d);
unsigned int irq = d->irq;
unsigned int i = irq - eic->first_irq;
u32 mode, edge, level;
@@ -191,7 +191,7 @@ static int __init eic_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int_irq = platform_get_irq(pdev, 0);
- if (!regs || !int_irq) {
+ if (!regs || (int)int_irq <= 0) {
dev_dbg(&pdev->dev, "missing regs and/or irq resource\n");
return -ENXIO;
}
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index f308e1ddc629..2e0aa853a4bc 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -257,7 +257,7 @@ static void gpio_irq_mask(struct irq_data *d)
pio_writel(pio, IDR, 1 << (gpio & 0x1f));
}
-static void gpio_irq_unmask(struct irq_data *d))
+static void gpio_irq_unmask(struct irq_data *d)
{
unsigned gpio = irq_to_gpio(d->irq);
struct pio_device *pio = &pio_dev[gpio >> 5];
diff --git a/arch/avr32/mach-at32ap/pm-at32ap700x.S b/arch/avr32/mach-at32ap/pm-at32ap700x.S
index 17503b0ed6c9..f868f4ce761b 100644
--- a/arch/avr32/mach-at32ap/pm-at32ap700x.S
+++ b/arch/avr32/mach-at32ap/pm-at32ap700x.S
@@ -53,7 +53,7 @@ cpu_enter_idle:
st.w r8[TI_flags], r9
unmask_interrupts
sleep CPU_SLEEP_IDLE
- .size cpu_idle_sleep, . - cpu_idle_sleep
+ .size cpu_enter_idle, . - cpu_enter_idle
/*
* Common return path for PM functions that don't run from
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 19e2c7c3e63a..44bd0cced725 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -19,11 +19,11 @@
* Force strict CPU ordering.
*/
#define nop() __asm__ __volatile__ ("nop;\n\t" : : )
-#define mb() __asm__ __volatile__ ("" : : : "memory")
-#define rmb() __asm__ __volatile__ ("" : : : "memory")
-#define wmb() __asm__ __volatile__ ("" : : : "memory")
-#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#define read_barrier_depends() do { } while(0)
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define smp_read_barrier_depends() read_barrier_depends()
#ifdef CONFIG_SMP
asmlinkage unsigned long __raw_xchg_1_asm(volatile void *ptr, unsigned long value);
@@ -37,16 +37,16 @@ asmlinkage unsigned long __raw_cmpxchg_4_asm(volatile void *ptr,
unsigned long new, unsigned long old);
#ifdef __ARCH_SYNC_CORE_DCACHE
-# define smp_mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
-# define smp_rmb() do { barrier(); smp_check_barrier(); } while (0)
-# define smp_wmb() do { barrier(); smp_mark_barrier(); } while (0)
-#define smp_read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
-
+/* Force Core data cache coherence */
+# define mb() do { barrier(); smp_check_barrier(); smp_mark_barrier(); } while (0)
+# define rmb() do { barrier(); smp_check_barrier(); } while (0)
+# define wmb() do { barrier(); smp_mark_barrier(); } while (0)
+# define read_barrier_depends() do { barrier(); smp_check_barrier(); } while (0)
#else
-# define smp_mb() barrier()
-# define smp_rmb() barrier()
-# define smp_wmb() barrier()
-#define smp_read_barrier_depends() barrier()
+# define mb() barrier()
+# define rmb() barrier()
+# define wmb() barrier()
+# define read_barrier_depends() do { } while (0)
#endif
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
@@ -99,10 +99,10 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#else /* !CONFIG_SMP */
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define read_barrier_depends() do { } while (0)
struct __xchg_dummy {
unsigned long a[100];
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index cdbe075de1dc..8b81dc04488a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
_disable_gptimers(mask);
for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
if (mask & (1 << i))
- group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
+ group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
SSYNC();
}
EXPORT_SYMBOL(disable_gptimers);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 8c9a43daf80f..cdb4beb6bc8f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -206,8 +206,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
smp_mb();
- evt->event_handler(evt);
+ /*
+ * We want to ACK before we handle so that we can handle smaller timer
+ * intervals. This way if the timer expires again while we're handling
+ * things, we're more likely to see that 2nd int rather than swallowing
+ * it by ACKing the int at the end of this handler.
+ */
bfin_gptmr0_ack();
+ evt->event_handler(evt);
return IRQ_HANDLED;
}
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 6e17a265c4d3..8bce5ed031e4 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -109,10 +109,23 @@ static void ipi_flush_icache(void *info)
struct blackfin_flush_data *fdata = info;
/* Invalidate the memory holding the bounds of the flushed region. */
- invalidate_dcache_range((unsigned long)fdata,
- (unsigned long)fdata + sizeof(*fdata));
+ blackfin_dcache_invalidate_range((unsigned long)fdata,
+ (unsigned long)fdata + sizeof(*fdata));
+
+ /* Make sure all write buffers in the data side of the core
+ * are flushed before trying to invalidate the icache. This
+ * needs to be after the data flush and before the icache
+ * flush so that the SSYNC does the right thing in preventing
+ * the instruction prefetcher from hitting things in cached
+ * memory at the wrong time -- it runs much further ahead than
+ * the pipeline.
+ */
+ SSYNC();
- flush_icache_range(fdata->start, fdata->end);
+ /* ipi_flaush_icache is invoked by generic flush_icache_range,
+ * so call blackfin arch icache flush directly here.
+ */
+ blackfin_icache_flush_range(fdata->start, fdata->end);
}
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 26d851d385bb..29e17907d9f2 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -343,10 +343,14 @@
#define __NR_fanotify_init 337
#define __NR_fanotify_mark 338
#define __NR_prlimit64 339
+#define __NR_name_to_handle_at 340
+#define __NR_open_by_handle_at 341
+#define __NR_clock_adjtime 342
+#define __NR_syncfs 343
#ifdef __KERNEL__
-#define NR_syscalls 340
+#define NR_syscalls 344
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
index 1559dea36e55..1359ee659574 100644
--- a/arch/m68k/kernel/entry_mm.S
+++ b/arch/m68k/kernel/entry_mm.S
@@ -750,4 +750,8 @@ sys_call_table:
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
+ .long sys_name_to_handle_at /* 340 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 79b1ed198c07..9b8393d8adb8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -358,6 +358,10 @@ ENTRY(sys_call_table)
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
+ .long sys_name_to_handle_at /* 340 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
+ .long sys_syncfs
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 851b3bf6e962..eccdefe70d4e 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,7 +6,6 @@ config MICROBLAZE
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
- select USB_ARCH_HAS_EHCI
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_OPROFILE
select HAVE_ARCH_KGDB
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b6ff882f695b..8f4d50b0adfa 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
config ARCH_SUSPEND_POSSIBLE
def_bool y
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
- PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
+ (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
config PPC_DCR_NATIVE
bool
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index be3cdf9134ce..1833d1a07e79 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL)
+#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500)
+#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
+#endif
#else
enum {
CPU_FTRS_POSSIBLE =
@@ -473,16 +479,21 @@ enum {
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
+ CPU_FTRS_E5500 |
#endif
0,
};
#endif /* __powerpc64__ */
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_BOOK3E
+#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500)
+#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
+#endif
#else
enum {
CPU_FTRS_ALWAYS =
@@ -513,6 +524,7 @@ enum {
#endif
#ifdef CONFIG_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
+ CPU_FTRS_E5500 &
#endif
CPU_FTRS_POSSIBLE,
};
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 811f04ac3660..8d1569c29042 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
* on platforms where such control is possible.
*/
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES)
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
#else
#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c9b68d07ac4f..b9602ee06deb 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffff0000,
.pvr_value = 0x80240000,
.cpu_name = "e5500",
- .cpu_features = CPU_FTRS_E500MC,
+ .cpu_features = CPU_FTRS_E5500,
.cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
MMU_FTR_USE_TLBILX,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 3d3d416339dd..5b5e1f002a8e 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
}
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP)
+#ifdef CONFIG_PPC_STD_MMU_64
static void crash_kexec_wait_realmode(int cpu)
{
unsigned int msecs;
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
}
mb();
}
-#else
-static inline void crash_kexec_wait_realmode(int cpu) {}
-#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
/*
* This function will be called by secondary cpus or by kexec cpu
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
crash_ipi_callback(regs);
}
-#else
+#else /* ! CONFIG_SMP */
+static inline void crash_kexec_wait_realmode(int cpu) {}
+
static void crash_kexec_prepare_cpus(int cpu)
{
/*
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
{
cpus_in_sr = CPU_MASK_NONE;
}
-#endif
+#endif /* CONFIG_SMP */
/*
* Register a function to be called on shutdown. Only use this if you
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index c00d4ca1ee15..28581f1ad2c0 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -527,7 +527,7 @@ static int ibmebus_bus_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int ibmebus_bus_pm_freeze(struct device *dev)
{
@@ -665,7 +665,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define ibmebus_bus_pm_freeze NULL
#define ibmebus_bus_pm_thaw NULL
@@ -676,7 +676,7 @@ static int ibmebus_bus_pm_restore_noirq(struct device *dev)
#define ibmebus_bus_pm_poweroff_noirq NULL
#define ibmebus_bus_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
.prepare = ibmebus_bus_pm_prepare,
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c834757bebc0..2b97b80d6d7d 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
if (!parent)
continue;
if (of_match_node(legacy_serial_parents, parent) != NULL) {
- index = add_legacy_soc_port(np, np);
- if (index >= 0 && np == stdout)
- legacy_serial_console = index;
+ if (of_device_is_available(np)) {
+ index = add_legacy_soc_port(np, np);
+ if (index >= 0 && np == stdout)
+ legacy_serial_console = index;
+ }
}
of_node_put(parent);
}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index c4063b7f49a0..822f63008ae1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
return 0;
}
+static u64 check_and_compute_delta(u64 prev, u64 val)
+{
+ u64 delta = (val - prev) & 0xfffffffful;
+
+ /*
+ * POWER7 can roll back counter values, if the new value is smaller
+ * than the previous value it will cause the delta and the counter to
+ * have bogus values unless we rolled a counter over. If a coutner is
+ * rolled back, it will be smaller, but within 256, which is the maximum
+ * number of events to rollback at once. If we dectect a rollback
+ * return 0. This can lead to a small lack of precision in the
+ * counters.
+ */
+ if (prev > val && (prev - val) < 256)
+ delta = 0;
+
+ return delta;
+}
+
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
+ delta = check_and_compute_delta(prev, val);
+ if (!delta)
+ return;
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
- /* The counters are only 32 bits wide */
- delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
val = (event->hw.idx == 5) ? pmc5 : pmc6;
prev = local64_read(&event->hw.prev_count);
event->hw.idx = 0;
- delta = (val - prev) & 0xfffffffful;
- local64_add(delta, &event->count);
+ delta = check_and_compute_delta(prev, val);
+ if (delta)
+ local64_add(delta, &event->count);
}
}
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
unsigned long pmc5, unsigned long pmc6)
{
struct perf_event *event;
- u64 val;
+ u64 val, prev;
int i;
for (i = 0; i < cpuhw->n_limited; ++i) {
event = cpuhw->limited_counter[i];
event->hw.idx = cpuhw->limited_hwidx[i];
val = (event->hw.idx == 5) ? pmc5 : pmc6;
- local64_set(&event->hw.prev_count, val);
+ prev = local64_read(&event->hw.prev_count);
+ if (check_and_compute_delta(prev, val))
+ local64_set(&event->hw.prev_count, val);
perf_event_update_userpage(event);
}
}
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
- delta = (val - prev) & 0xfffffffful;
+ delta = check_and_compute_delta(prev, val);
local64_add(delta, &event->count);
/*
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 375480c56eb9..f33acfd872ad 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
u64 stolen = 0;
u64 dtb;
+ if (!dtl)
+ return 0;
+
if (i == vpa->dtl_idx)
return 0;
while (i < vpa->dtl_idx) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a830c5e80657..bc5f0dc6ae1e 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
mpic_setup_this_cpu();
}
+#ifdef CONFIG_PPC64
#ifdef CONFIG_HOTPLUG_CPU
static int smp_core99_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
static void __init smp_core99_bringup_done(void)
{
-#ifdef CONFIG_PPC64
extern void g5_phy_disable_cpu1(void);
/* Close i2c bus if it was used for tb sync */
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
set_cpu_present(1, false);
g5_phy_disable_cpu1();
}
-#endif /* CONFIG_PPC64 */
-
#ifdef CONFIG_HOTPLUG_CPU
register_cpu_notifier(&smp_core99_cpu_nb);
#endif
+
if (ppc_md.progress)
ppc_md.progress("smp_core99_bringup_done", 0x349);
}
+#endif /* CONFIG_PPC64 */
#ifdef CONFIG_HOTPLUG_CPU
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
struct smp_ops_t core99_smp_ops = {
.message_pass = smp_mpic_message_pass,
.probe = smp_core99_probe,
+#ifdef CONFIG_PPC64
.bringup_done = smp_core99_bringup_done,
+#endif
.kick_cpu = smp_core99_kick_cpu,
.setup_cpu = smp_core99_setup_cpu,
.give_timebase = smp_core99_give_timebase,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 000724149089..6c42cfde8415 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
int cpu, ret;
struct paca_struct *pp;
struct dtl_entry *dtl;
+ struct kmem_cache *dtl_cache;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
+ dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
+ DISPATCH_LOG_BYTES, 0, NULL);
+ if (!dtl_cache) {
+ pr_warn("Failed to create dispatch trace log buffer cache\n");
+ pr_warn("Stolen time statistics will be unreliable\n");
+ return 0;
+ }
+
for_each_possible_cpu(cpu) {
pp = &paca[cpu];
- dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL,
- cpu_to_node(cpu));
+ dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
if (!dtl) {
pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
cpu);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index f8f7f28c6343..68ca9290df94 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
struct resource rsrc;
const int *bus_range;
+ if (!of_device_is_available(dev)) {
+ pr_warning("%s: disabled\n", dev->full_name);
+ return -ENODEV;
+ }
+
pr_debug("Adding PCI host bridge %s\n", dev->full_name);
/* Fetch host bridge registers address */
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 14232d57369c..49798532b477 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1457,7 +1457,6 @@ int fsl_rio_setup(struct platform_device *dev)
port->ops = ops;
port->priv = priv;
port->phys_efptr = 0x100;
- rio_register_mport(port);
priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
rio_regs_win = priv->regs_win;
@@ -1504,6 +1503,9 @@ int fsl_rio_setup(struct platform_device *dev)
dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
port->sys_size ? 65536 : 256);
+ if (rio_register_mport(port))
+ goto err;
+
if (port->host_deviceid >= 0)
out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7061398341d5..fb317bf2c378 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -460,7 +460,7 @@ startup:
#ifndef CONFIG_MARCH_G5
# check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
- stfl __LC_STFL_FAC_LIST # store facility list
+ .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
jz 0f
la %r0,0
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
index 469f11b574fa..20530dd2eab1 100644
--- a/arch/s390/kernel/switch_cpu.S
+++ b/arch/s390/kernel/switch_cpu.S
@@ -46,7 +46,9 @@ smp_restart_cpu:
ltr %r4,%r4 /* New stack ? */
jz 1f
lr %r15,%r4
-1: basr %r14,%r2
+1: lr %r14,%r2 /* r14: Function to call */
+ lr %r2,%r3 /* r2 : Parameter for function*/
+ basr %r14,%r14 /* Call function */
.gprregs_addr:
.long .gprregs
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
index d94aacc898cb..5be3f43898f9 100644
--- a/arch/s390/kernel/switch_cpu64.S
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -42,7 +42,9 @@ smp_restart_cpu:
ltgr %r4,%r4 /* New stack ? */
jz 1f
lgr %r15,%r4
-1: basr %r14,%r2
+1: lgr %r14,%r2 /* r14: Function to call */
+ lgr %r2,%r3 /* r2 : Parameter for function*/
+ basr %r14,%r14 /* Call function */
.section .data,"aw",@progbits
.gprregs:
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 3d48f4db246d..4952872d6f0a 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -517,12 +517,8 @@ stop_exit:
static int check_hardware_prerequisites(void)
{
- unsigned long long facility_bits[2];
-
- memcpy(facility_bits, S390_lowcore.stfle_fac_list, 32);
- if (!(facility_bits[1] & (1ULL << 59)))
+ if (!test_facility(68))
return -EOPNOTSUPP;
-
return 0;
}
/*
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86
index 02fb017fed47..a9da516a5274 100644
--- a/arch/um/Kconfig.x86
+++ b/arch/um/Kconfig.x86
@@ -4,6 +4,10 @@ menu "UML-specific options"
menu "Host processor type and features"
+config CMPXCHG_LOCAL
+ bool
+ default n
+
source "arch/x86/Kconfig.cpu"
endmenu
diff --git a/arch/um/include/asm/bug.h b/arch/um/include/asm/bug.h
new file mode 100644
index 000000000000..9e33b864c359
--- /dev/null
+++ b/arch/um/include/asm/bug.h
@@ -0,0 +1,6 @@
+#ifndef __UM_BUG_H
+#define __UM_BUG_H
+
+#include <asm-generic/bug.h>
+
+#endif
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 43085bfc99c3..156cd5d18d2a 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
* Don't enable translation but enable GART IO and CPU accesses.
* Also, set DISTLBWALKPRB since GART tables memory is UC.
*/
- ctl = DISTLBWALKPRB | order << 1;
+ ctl = order << 1;
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
- /* address of the mappings table */
- addr >>= 12;
- tmp = (u32) addr<<4;
- tmp &= ~0xf;
- pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
-
- /* Enable GART translation for this hammer. */
- pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
- ctl |= GARTEN;
- ctl &= ~(DISGARTCPU | DISGARTIO);
- pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
+ /* address of the mappings table */
+ addr >>= 12;
+ tmp = (u32) addr<<4;
+ tmp &= ~0xf;
+ pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
+
+ /* Enable GART translation for this hammer. */
+ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
+ ctl |= GARTEN | DISTLBWALKPRB;
+ ctl &= ~(DISGARTCPU | DISGARTIO);
+ pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index fd5a1f365c95..3cce71413d0b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -96,11 +96,15 @@
#define MSR_IA32_MC0_ADDR 0x00000402
#define MSR_IA32_MC0_MISC 0x00000403
+#define MSR_AMD64_MC0_MASK 0xc0010044
+
#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
+#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
+
/* These are consecutive and not in the normal 4er MCE bank block */
#define MSR_IA32_MC0_CTL2 0x00000280
#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 3d4dab43c994..a50fc9f493b3 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -51,7 +51,7 @@ static inline void numa_remove_cpu(int cpu) { }
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
+void debug_cpumask_set_cpu(int cpu, int node, bool enable);
#endif
#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 86d1ad4962a7..73fb469908c6 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -499,7 +499,7 @@ out:
* Don't enable translation yet but enable GART IO and CPU
* accesses and set DISTLBWALKPRB since GART table memory is UC.
*/
- u32 ctl = DISTLBWALKPRB | aper_order << 1;
+ u32 ctl = aper_order << 1;
bus = amd_nb_bus_dev_ranges[i].bus;
dev_base = amd_nb_bus_dev_ranges[i].dev_base;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 0b4be431c620..adee12e0da1f 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -228,6 +228,7 @@
#include <linux/kthread.h>
#include <linux/jiffies.h>
#include <linux/acpi.h>
+#include <linux/syscore_ops.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -1238,6 +1239,7 @@ static int suspend(int vetoable)
local_irq_disable();
sysdev_suspend(PMSG_SUSPEND);
+ syscore_suspend();
local_irq_enable();
@@ -1255,6 +1257,7 @@ static int suspend(int vetoable)
apm_error("suspend", err);
err = (err == APM_SUCCESS) ? 0 : -EIO;
+ syscore_resume();
sysdev_resume();
local_irq_enable();
@@ -1280,6 +1283,7 @@ static void standby(void)
local_irq_disable();
sysdev_suspend(PMSG_SUSPEND);
+ syscore_suspend();
local_irq_enable();
err = set_system_power_state(APM_STATE_STANDBY);
@@ -1287,6 +1291,7 @@ static void standby(void)
apm_error("standby", err);
local_irq_disable();
+ syscore_resume();
sysdev_resume();
local_irq_enable();
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 3ecece0217ef..3532d3bf8105 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
/* As a rule processors have APIC timer running in deep C states */
if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
set_cpu_cap(c, X86_FEATURE_ARAT);
+
+ /*
+ * Disable GART TLB Walk Errors on Fam10h. We do this here
+ * because this is always needed when GART is enabled, even in a
+ * kernel which has no MCE support built in.
+ */
+ if (c->x86 == 0x10) {
+ /*
+ * BIOS should disable GartTlbWlk Errors themself. If
+ * it doesn't do it here as suggested by the BKDG.
+ *
+ * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
+ */
+ u64 mask;
+
+ rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ mask |= (1 << 10);
+ wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ }
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index eed3673a8656..fac0654021b8 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -586,8 +586,12 @@ static int x86_setup_perfctr(struct perf_event *event)
return -EOPNOTSUPP;
}
+ /*
+ * Do not allow config1 (extended registers) to propagate,
+ * there's no sane user-space generalization yet:
+ */
if (attr->type == PERF_TYPE_RAW)
- return x86_pmu_extra_regs(event->attr.config, event);
+ return 0;
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
@@ -609,8 +613,8 @@ static int x86_setup_perfctr(struct perf_event *event)
/*
* Branch tracing:
*/
- if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
- (hwc->sample_period == 1)) {
+ if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
+ !attr->freq && hwc->sample_period == 1) {
/* BTS is not supported by this architecture. */
if (!x86_pmu.bts_active)
return -EOPNOTSUPP;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 8fc2b2cee1da..9ae4a2aa7398 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -25,7 +25,7 @@ struct intel_percore {
/*
* Intel PerfMon, used on Core and later.
*/
-static const u64 intel_perfmon_event_map[] =
+static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
@@ -391,12 +391,12 @@ static __initconst const u64 nehalem_hw_cache_event_ids
{
[ C(L1D) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
- [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
+ [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
+ [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
- [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
+ [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
+ [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
@@ -998,6 +998,9 @@ intel_bts_constraints(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
+ if (event->attr.freq)
+ return NULL;
+
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
@@ -1305,7 +1308,7 @@ static void intel_clovertown_quirks(void)
* AJ106 could possibly be worked around by not allowing LBR
* usage from PEBS, including the fixup.
* AJ68 could possibly be worked around by always programming
- * a pebs_event_reset[0] value and coping with the lost events.
+ * a pebs_event_reset[0] value and coping with the lost events.
*
* But taken together it might just make sense to not enable PEBS on
* these chips.
@@ -1409,6 +1412,18 @@ static __init int intel_pmu_init(void)
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
x86_pmu.extra_regs = intel_nehalem_extra_regs;
+
+ if (ebx & 0x40) {
+ /*
+ * Erratum AAJ80 detected, we work it around by using
+ * the BR_MISP_EXEC.ANY event. This will over-count
+ * branch-misses, but it's still much better than the
+ * architectural event which is often completely bogus:
+ */
+ intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
+
+ pr_cont("erratum AAJ80 worked around, ");
+ }
pr_cont("Nehalem events, ");
break;
@@ -1425,6 +1440,7 @@ static __init int intel_pmu_init(void)
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
+ case 47: /* 32 nm Xeon E7 */
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index c2520e178d32..d1f77e2934a1 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -947,7 +947,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, 1, &data, regs))
- p4_pmu_disable_event(event);
+ x86_pmu_stop(event, 0);
}
if (handled) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 82ada01625b9..b117efd24f71 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
#define AGPEXTERN
#endif
+/* GART can only remap to physical addresses < 1TB */
+#define GART_MAX_PHYS_ADDR (1ULL << 40)
+
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir, unsigned long align_mask)
{
unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
- unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
+ unsigned long iommu_page;
int i;
+ if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
+ return bad_dma_addr;
+
+ iommu_page = alloc_iommu(dev, npages, align_mask);
if (iommu_page == -1) {
if (!nonforced_iommu(dev, phys_mem, size))
return phys_mem;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 9559d360fde7..745258dfc4dc 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -213,53 +213,48 @@ int early_cpu_to_node(int cpu)
return per_cpu(x86_cpu_to_node_map, cpu);
}
-struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
+void debug_cpumask_set_cpu(int cpu, int node, bool enable)
{
- int node = early_cpu_to_node(cpu);
struct cpumask *mask;
char buf[64];
if (node == NUMA_NO_NODE) {
/* early_cpu_to_node() already emits a warning and trace */
- return NULL;
+ return;
}
mask = node_to_cpumask_map[node];
if (!mask) {
pr_err("node_to_cpumask_map[%i] NULL\n", node);
dump_stack();
- return NULL;
+ return;
}
+ if (enable)
+ cpumask_set_cpu(cpu, mask);
+ else
+ cpumask_clear_cpu(cpu, mask);
+
cpulist_scnprintf(buf, sizeof(buf), mask);
printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
enable ? "numa_add_cpu" : "numa_remove_cpu",
cpu, node, buf);
- return mask;
+ return;
}
# ifndef CONFIG_NUMA_EMU
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
{
- struct cpumask *mask;
-
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
}
void __cpuinit numa_add_cpu(int cpu)
{
- numa_set_cpumask(cpu, 1);
+ numa_set_cpumask(cpu, true);
}
void __cpuinit numa_remove_cpu(int cpu)
{
- numa_set_cpumask(cpu, 0);
+ numa_set_cpumask(cpu, false);
}
# endif /* !CONFIG_NUMA_EMU */
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index ad091e4cff17..de84cc140379 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -454,10 +454,9 @@ void __cpuinit numa_remove_cpu(int cpu)
cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
}
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static void __cpuinit numa_set_cpumask(int cpu, bool enable)
{
- struct cpumask *mask;
- int nid, physnid, i;
+ int nid, physnid;
nid = early_cpu_to_node(cpu);
if (nid == NUMA_NO_NODE) {
@@ -467,28 +466,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
physnid = emu_nid_to_phys[nid];
- for_each_online_node(i) {
+ for_each_online_node(nid) {
if (emu_nid_to_phys[nid] != physnid)
continue;
- mask = debug_cpumask_set_cpu(cpu, enable);
- if (!mask)
- return;
-
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
+ debug_cpumask_set_cpu(cpu, nid, enable);
}
}
void __cpuinit numa_add_cpu(int cpu)
{
- numa_set_cpumask(cpu, 1);
+ numa_set_cpumask(cpu, true);
}
void __cpuinit numa_remove_cpu(int cpu)
{
- numa_set_cpumask(cpu, 0);
+ numa_set_cpumask(cpu, false);
}
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts
index dc701ea58546..2d6d226f2b10 100644
--- a/arch/x86/platform/ce4100/falconfalls.dts
+++ b/arch/x86/platform/ce4100/falconfalls.dts
@@ -74,6 +74,7 @@
compatible = "intel,ce4100-pci", "pci";
device_type = "pci";
bus-range = <1 1>;
+ reg = <0x0800 0x0 0x0 0x0 0x0>;
ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
interrupt-parent = <&ioapic2>;
@@ -412,6 +413,7 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "isa";
+ reg = <0xf800 0x0 0x0 0x0 0x0>;
ranges = <1 0 0 0 0 0x100>;
rtc@70 {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 5c0207bf959b..275dbc19e2cf 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
pentry->freq_hz, pentry->irq);
if (!pentry->irq)
continue;
- mp_irq.type = MP_IOAPIC;
+ mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
mp_irq.irqflag = 5;
- mp_irq.srcbus = 0;
+ mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq;
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
totallen, (u32)pentry->phys_addr, pentry->irq);
- mp_irq.type = MP_IOAPIC;
+ mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = 0xf; /* level trigger and active low */
- mp_irq.srcbus = 0;
+ mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq;
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void)
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
+ set_bit(MP_BUS_ISA, mp_bus_not_pci);
}
/*
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 1c7121ba18ff..5cc821cb2e09 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -39,6 +39,7 @@ config XEN_MAX_DOMAIN_MEMORY
config XEN_SAVE_RESTORE
bool
depends on XEN
+ select HIBERNATE_CALLBACKS
default y
config XEN_DEBUG_FS
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 49dbd78ec3cb..e3c6a06cf725 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -238,6 +238,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
static __init void xen_init_cpuid_mask(void)
{
unsigned int ax, bx, cx, dx;
+ unsigned int xsave_mask;
cpuid_leaf1_edx_mask =
~((1 << X86_FEATURE_MCE) | /* disable MCE */
@@ -249,24 +250,16 @@ static __init void xen_init_cpuid_mask(void)
cpuid_leaf1_edx_mask &=
~((1 << X86_FEATURE_APIC) | /* disable local APIC */
(1 << X86_FEATURE_ACPI)); /* disable ACPI */
-
ax = 1;
- cx = 0;
xen_cpuid(&ax, &bx, &cx, &dx);
- /* cpuid claims we support xsave; try enabling it to see what happens */
- if (cx & (1 << (X86_FEATURE_XSAVE % 32))) {
- unsigned long cr4;
-
- set_in_cr4(X86_CR4_OSXSAVE);
-
- cr4 = read_cr4();
+ xsave_mask =
+ (1 << (X86_FEATURE_XSAVE % 32)) |
+ (1 << (X86_FEATURE_OSXSAVE % 32));
- if ((cr4 & X86_CR4_OSXSAVE) == 0)
- cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_XSAVE % 32));
-
- clear_in_cr4(X86_CR4_OSXSAVE);
- }
+ /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
+ if ((cx & xsave_mask) != xsave_mask)
+ cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
}
static void xen_set_debugreg(int reg, unsigned long val)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c82df6c9c0f0..aef7af92b28b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -565,13 +565,13 @@ pte_t xen_make_pte_debug(pteval_t pte)
if (io_page &&
(xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT;
- WARN(addr != other_addr,
+ WARN_ONCE(addr != other_addr,
"0x%lx is using VM_IO, but it is 0x%lx!\n",
(unsigned long)addr, (unsigned long)other_addr);
} else {
pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP;
other_addr = (_pte.pte & PTE_PFN_MASK);
- WARN((addr == other_addr) && (!io_page) && (!iomap_set),
+ WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set),
"0x%lx is missing VM_IO (and wasn't fixed)!\n",
(unsigned long)addr);
}
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
#endif
}
+#ifdef CONFIG_X86_32
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
- unsigned long pfn = pte_pfn(pte);
-
-#ifdef CONFIG_X86_32
/* If there's an existing pte, then don't allow _PAGE_RW to be set */
if (pte_val_ma(*ptep) & _PAGE_PRESENT)
pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
pte_val_ma(pte));
-#endif
+
+ return pte;
+}
+#else /* CONFIG_X86_64 */
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
/*
* If the new pfn is within the range of the newly allocated
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
return pte;
}
+#endif /* CONFIG_X86_64 */
/* Init-time set_pte while constructing initial pagetables, which
doesn't allow RO pagetable pages to be remapped RW */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index fa0269a99377..90bac0aac3a5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,7 +227,7 @@ char * __init xen_memory_setup(void)
memcpy(map_raw, map, sizeof(map));
e820.nr_map = 0;
- xen_extra_mem_start = mem_end;
+ xen_extra_mem_start = max((1ULL << 32), mem_end);
for (i = 0; i < memmap.nr_entries; i++) {
unsigned long long end;
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index d77089df412e..4340ee076bd5 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -64,47 +64,41 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
int arch_show_interrupts(struct seq_file *p, int prec)
{
- int j;
-
- seq_printf(p, "%*s: ", prec, "NMI");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", nmi_count(j));
- seq_putc(p, '\n');
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
-static void xtensa_irq_mask(struct irq_chip *d)
+static void xtensa_irq_mask(struct irq_data *d)
{
cached_irq_mask &= ~(1 << d->irq);
set_sr (cached_irq_mask, INTENABLE);
}
-static void xtensa_irq_unmask(struct irq_chip *d)
+static void xtensa_irq_unmask(struct irq_data *d)
{
cached_irq_mask |= 1 << d->irq;
set_sr (cached_irq_mask, INTENABLE);
}
-static void xtensa_irq_enable(struct irq_chip *d)
+static void xtensa_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->irq);
xtensa_irq_unmask(d->irq);
}
-static void xtensa_irq_disable(struct irq_chip *d)
+static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d->irq);
variant_irq_disable(d->irq);
}
-static void xtensa_irq_ack(struct irq_chip *d)
+static void xtensa_irq_ack(struct irq_data *d)
{
set_sr(1 << d->irq, INTCLEAR);
}
-static int xtensa_irq_retrigger(struct irq_chip *d)
+static int xtensa_irq_retrigger(struct irq_data *d)
{
set_sr (1 << d->irq, INTSET);
return 1;
diff --git a/block/blk-core.c b/block/blk-core.c
index 90f22cc30799..a2e58eeb3549 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
}
EXPORT_SYMBOL(blk_dump_rq_flags);
-/*
- * Make sure that plugs that were pending when this function was entered,
- * are now complete and requests pushed to the queue.
-*/
-static inline void queue_sync_plugs(struct request_queue *q)
-{
- /*
- * If the current process is plugged and has barriers submitted,
- * we will livelock if we don't unplug first.
- */
- blk_flush_plug(current);
-}
-
static void blk_delay_work(struct work_struct *work)
{
struct request_queue *q;
q = container_of(work, struct request_queue, delay_work.work);
spin_lock_irq(q->queue_lock);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
}
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
*/
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
- schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+ queue_delayed_work(kblockd_workqueue, &q->delay_work,
+ msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL(blk_delay_queue);
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -298,38 +286,42 @@ void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
cancel_delayed_work_sync(&q->delay_work);
- queue_sync_plugs(q);
}
EXPORT_SYMBOL(blk_sync_queue);
/**
* __blk_run_queue - run a single device queue
* @q: The queue to run
- * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
*
* Description:
* See @blk_run_queue. This variant must be called with the queue lock
* held and interrupts disabled.
- *
*/
-void __blk_run_queue(struct request_queue *q, bool force_kblockd)
+void __blk_run_queue(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
- /*
- * Only recurse once to avoid overrunning the stack, let the unplug
- * handling reinvoke the handler shortly if we already got there.
- */
- if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else
- queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+ q->request_fn(q);
}
EXPORT_SYMBOL(__blk_run_queue);
/**
+ * blk_run_queue_async - run a single device queue in workqueue context
+ * @q: The queue to run
+ *
+ * Description:
+ * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
+ * of us.
+ */
+void blk_run_queue_async(struct request_queue *q)
+{
+ if (likely(!blk_queue_stopped(q)))
+ queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+}
+EXPORT_SYMBOL(blk_run_queue_async);
+
+/**
* blk_run_queue - run a single device queue
* @q: The queue to run
*
@@ -342,7 +334,7 @@ void blk_run_queue(struct request_queue *q)
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +983,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_queue_end_tag(q, rq);
add_acct_request(q, rq, where);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1303,15 @@ get_rq:
plug = current->plug;
if (plug) {
- if (!plug->should_sort && !list_empty(&plug->list)) {
+ /*
+ * If this is the first request added after a plug, fire
+ * of a plug trace. If others have been added before, check
+ * if we have multiple devices in this plug. If so, make a
+ * note to sort the list before dispatch.
+ */
+ if (list_empty(&plug->list))
+ trace_block_plug(q);
+ else if (!plug->should_sort) {
struct request *__rq;
__rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1327,7 @@ get_rq:
} else {
spin_lock_irq(q->queue_lock);
add_acct_request(q, req, where);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
out_unlock:
spin_unlock_irq(q->queue_lock);
}
@@ -2644,6 +2644,7 @@ void blk_start_plug(struct blk_plug *plug)
plug->magic = PLUG_MAGIC;
INIT_LIST_HEAD(&plug->list);
+ INIT_LIST_HEAD(&plug->cb_list);
plug->should_sort = 0;
/*
@@ -2668,33 +2669,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
return !(rqa->q <= rqb->q);
}
-static void flush_plug_list(struct blk_plug *plug)
+/*
+ * If 'from_schedule' is true, then postpone the dispatch of requests
+ * until a safe kblockd context. We due this to avoid accidental big
+ * additional stack usage in driver dispatch, in places where the originally
+ * plugger did not intend it.
+ */
+static void queue_unplugged(struct request_queue *q, unsigned int depth,
+ bool from_schedule)
+ __releases(q->queue_lock)
+{
+ trace_block_unplug(q, depth, !from_schedule);
+
+ /*
+ * If we are punting this to kblockd, then we can safely drop
+ * the queue_lock before waking kblockd (which needs to take
+ * this lock).
+ */
+ if (from_schedule) {
+ spin_unlock(q->queue_lock);
+ blk_run_queue_async(q);
+ } else {
+ __blk_run_queue(q);
+ spin_unlock(q->queue_lock);
+ }
+
+}
+
+static void flush_plug_callbacks(struct blk_plug *plug)
+{
+ LIST_HEAD(callbacks);
+
+ if (list_empty(&plug->cb_list))
+ return;
+
+ list_splice_init(&plug->cb_list, &callbacks);
+
+ while (!list_empty(&callbacks)) {
+ struct blk_plug_cb *cb = list_first_entry(&callbacks,
+ struct blk_plug_cb,
+ list);
+ list_del(&cb->list);
+ cb->callback(cb);
+ }
+}
+
+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
unsigned long flags;
struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
BUG_ON(plug->magic != PLUG_MAGIC);
+ flush_plug_callbacks(plug);
if (list_empty(&plug->list))
return;
- if (plug->should_sort)
- list_sort(NULL, &plug->list, plug_rq_cmp);
+ list_splice_init(&plug->list, &list);
+
+ if (plug->should_sort) {
+ list_sort(NULL, &list, plug_rq_cmp);
+ plug->should_sort = 0;
+ }
q = NULL;
+ depth = 0;
+
+ /*
+ * Save and disable interrupts here, to avoid doing it for every
+ * queue lock we have to take.
+ */
local_irq_save(flags);
- while (!list_empty(&plug->list)) {
- rq = list_entry_rq(plug->list.next);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
- if (q) {
- __blk_run_queue(q, false);
- spin_unlock(q->queue_lock);
- }
+ /*
+ * This drops the queue lock
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
q = rq->q;
+ depth = 0;
spin_lock(q->queue_lock);
}
rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2767,27 @@ static void flush_plug_list(struct blk_plug *plug)
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
- }
- if (q) {
- __blk_run_queue(q, false);
- spin_unlock(q->queue_lock);
+ depth++;
}
- BUG_ON(!list_empty(&plug->list));
- local_irq_restore(flags);
-}
-
-static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
- flush_plug_list(plug);
+ /*
+ * This drops the queue lock
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
- if (plug == tsk->plug)
- tsk->plug = NULL;
+ local_irq_restore(flags);
}
void blk_finish_plug(struct blk_plug *plug)
{
- if (plug)
- __blk_finish_plug(current, plug);
-}
-EXPORT_SYMBOL(blk_finish_plug);
+ blk_flush_plug_list(plug, false);
-void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug)
-{
- __blk_finish_plug(tsk, plug);
- tsk->plug = plug;
+ if (plug == current->plug)
+ current->plug = NULL;
}
-EXPORT_SYMBOL(__blk_flush_plug);
+EXPORT_SYMBOL(blk_finish_plug);
int __init blk_dev_init(void)
{
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7482b7fa863b..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where);
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
/* the queue is stopped so it won't be plugged+unplugged */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index eba4a2790c6c..6c9b5e189e62 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
* request_fn may confuse the driver. Always use kblockd.
*/
if (queued)
- __blk_run_queue(q, true);
+ blk_run_queue_async(q);
}
/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io().
*/
if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
- __blk_run_queue(q, true);
+ blk_run_queue_async(q);
}
/**
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 261c75c665ae..bd236313f35d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_SYNC);
- } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
+ } else {
blk_clear_queue_full(q, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_ASYNC);
- } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
+ } else {
blk_clear_queue_full(q, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
{
int ret;
struct device *dev = disk_to_dev(disk);
-
struct request_queue *q = disk->queue;
if (WARN_ON(!q))
@@ -509,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
return ret;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
- if (ret < 0)
+ if (ret < 0) {
+ blk_trace_remove_sysfs(dev);
return ret;
+ }
kobject_uevent(&q->kobj, KOBJ_ADD);
@@ -521,7 +522,7 @@ int blk_register_queue(struct gendisk *disk)
if (ret) {
kobject_uevent(&q->kobj, KOBJ_REMOVE);
kobject_del(&q->kobj);
- blk_trace_remove_sysfs(disk_to_dev(disk));
+ blk_trace_remove_sysfs(dev);
kobject_put(&dev->kobj);
return ret;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3be881ec95ad..5b52011e3a40 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
}
/*
- * Must always be called with the rcu_read_lock() held
+ * Call func for each cic attached to this ioc.
*/
static void
-__call_for_each_cic(struct io_context *ioc,
- void (*func)(struct io_context *, struct cfq_io_context *))
+call_for_each_cic(struct io_context *ioc,
+ void (*func)(struct io_context *, struct cfq_io_context *))
{
struct cfq_io_context *cic;
struct hlist_node *n;
+ rcu_read_lock();
+
hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
func(ioc, cic);
-}
-/*
- * Call func for each cic attached to this ioc.
- */
-static void
-call_for_each_cic(struct io_context *ioc,
- void (*func)(struct io_context *, struct cfq_io_context *))
-{
- rcu_read_lock();
- __call_for_each_cic(ioc, func);
rcu_read_unlock();
}
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
* should be ok to iterate over the known list, we will see all cic's
* since no new ones are added.
*/
- __call_for_each_cic(ioc, cic_free_func);
+ call_for_each_cic(ioc, cic_free_func);
}
static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -3368,7 +3360,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
} else {
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
@@ -3383,7 +3375,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
}
}
@@ -3743,7 +3735,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue, false);
+ __blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..45ca1e34f582 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
q->boundary_rq = rq;
}
} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
- where == ELEVATOR_INSERT_SORT)
+ (where == ELEVATOR_INSERT_SORT ||
+ where == ELEVATOR_INSERT_SORT_MERGE))
where = ELEVATOR_INSERT_BACK;
switch (where) {
@@ -695,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- __blk_run_queue(q, false);
+ __blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT_MERGE:
diff --git a/block/genhd.c b/block/genhd.c
index b364bd038a18..2dd988723d73 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
spin_unlock_irq(&ev->lock);
- /* tell userland about new events */
+ /*
+ * Tell userland about new events. Only the events listed in
+ * @disk->events are reported. Unlisted events are processed the
+ * same internally but never get reported to userland.
+ */
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & (1 << i))
+ if (events & disk->events & (1 << i))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 821040503154..7025593a58c8 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -214,7 +214,7 @@ static int amba_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int amba_pm_freeze(struct device *dev)
{
@@ -352,7 +352,7 @@ static int amba_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
@@ -363,7 +363,7 @@ static int amba_pm_restore_noirq(struct device *dev)
#define amba_pm_poweroff_noirq NULL
#define amba_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 39d829cd82dd..71afe0371311 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -150,7 +150,7 @@ static const struct ata_port_info ahci_port_info[] = {
{
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
AHCI_HFLAG_YES_NCQ),
- .flags = AHCI_FLAG_COMMON,
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
@@ -261,6 +261,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 39865009c251..12c5282e7fca 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -229,6 +229,10 @@ enum {
EM_CTL_ALHD = (1 << 26), /* Activity LED */
EM_CTL_XMT = (1 << 25), /* Transmit Only */
EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
+ EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
+ EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
+ EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
+ EM_CTL_LED = (1 << 16), /* LED messages supported */
/* em message type */
EM_MSG_TYPE_LED = (1 << 0), /* LED */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 0bc3fd6c3fdb..6f6e7718b05c 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -309,6 +309,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
/* SATA Controller IDE (PBG) */
{ 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Panther Point) */
+ { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (Panther Point) */
+ { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
+ /* SATA Controller IDE (Panther Point) */
+ { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Panther Point) */
+ { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
};
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 26d452339e98..ff9d832a163d 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -109,6 +109,8 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
static ssize_t ahci_store_em_buffer(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size);
+static ssize_t ahci_show_em_supported(struct device *dev,
+ struct device_attribute *attr, char *buf);
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -116,6 +118,7 @@ static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
+static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
@@ -126,6 +129,7 @@ struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_ahci_host_version,
&dev_attr_ahci_port_cmd,
&dev_attr_em_buffer,
+ &dev_attr_em_message_supported,
NULL
};
EXPORT_SYMBOL_GPL(ahci_shost_attrs);
@@ -343,6 +347,24 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
return size;
}
+static ssize_t ahci_show_em_supported(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ata_port *ap = ata_shost_to_port(shost);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 em_ctl;
+
+ em_ctl = readl(mmio + HOST_EM_CTL);
+
+ return sprintf(buf, "%s%s%s%s\n",
+ em_ctl & EM_CTL_LED ? "led " : "",
+ em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
+ em_ctl & EM_CTL_SES ? "ses-2 " : "",
+ em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
+}
+
/**
* ahci_save_initial_config - Save and fixup initial config values
* @dev: target AHCI device
@@ -539,6 +561,27 @@ void ahci_start_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
+ u8 status;
+
+ status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+
+ /*
+ * At end of section 10.1 of AHCI spec (rev 1.3), it states
+ * Software shall not set PxCMD.ST to 1 until it is determined
+ * that a functoinal device is present on the port as determined by
+ * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
+ *
+ * Even though most AHCI host controllers work without this check,
+ * specific controller will fail under this condition
+ */
+ if (status & (ATA_BUSY | ATA_DRQ))
+ return;
+ else {
+ ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
+
+ if ((tmp & 0xf) != 0x3)
+ return;
+ }
/* start DMA */
tmp = readl(port_mmio + PORT_CMD);
@@ -1897,7 +1940,17 @@ static void ahci_pmp_attach(struct ata_port *ap)
ahci_enable_fbs(ap);
pp->intr_mask |= PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /*
+ * We must not change the port interrupt mask register if the
+ * port is marked frozen, the value in pp->intr_mask will be
+ * restored later when the port is thawed.
+ *
+ * Note that during initialization, the port is marked as
+ * frozen since the irq handler is not yet registered.
+ */
+ if (!(ap->pflags & ATA_PFLAG_FROZEN))
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
static void ahci_pmp_detach(struct ata_port *ap)
@@ -1913,7 +1966,10 @@ static void ahci_pmp_detach(struct ata_port *ap)
writel(cmd, port_mmio + PORT_CMD);
pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
- writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
+
+ /* see comment above in ahci_pmp_attach() */
+ if (!(ap->pflags & ATA_PFLAG_FROZEN))
+ writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
int ahci_port_resume(struct ata_port *ap)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 423c0a6952b2..76c3c15cb1e6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4139,6 +4139,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER },
/* End Marker */
{ }
@@ -5480,7 +5481,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
if (!ap)
return NULL;
- ap->pflags |= ATA_PFLAG_INITIALIZING;
+ ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
ap->lock = &host->lock;
ap->print_id = -1;
ap->host = host;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 88cd22fa65cd..f26f2fe3480a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3316,6 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
+ bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM;
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
@@ -3332,7 +3333,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
*/
ata_for_each_dev(dev, link, ENABLED) {
bool hipm = ata_id_has_hipm(dev->id);
- bool dipm = ata_id_has_dipm(dev->id);
+ bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
/* find the first enabled and LPM enabled devices */
if (!link_dev)
@@ -3389,7 +3390,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
/* host config updated, enable DIPM if transitioning to MIN_POWER */
ata_for_each_dev(dev, link, ENABLED) {
- if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
+ if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
+ ata_id_has_dipm(dev->id)) {
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 0da0dcc7dd08..a5fdbdcb0faf 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -33,11 +33,12 @@
#define DRV_NAME "pata_at91"
-#define DRV_VERSION "0.1"
+#define DRV_VERSION "0.2"
#define CF_IDE_OFFSET 0x00c00000
#define CF_ALT_IDE_OFFSET 0x00e00000
#define CF_IDE_RES_SIZE 0x08
+#define NCS_RD_PULSE_LIMIT 0x3f /* maximal value for pulse bitfields */
struct at91_ide_info {
unsigned long mode;
@@ -49,8 +50,18 @@ struct at91_ide_info {
void __iomem *alt_addr;
};
-static const struct ata_timing initial_timing =
- {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0};
+static const struct ata_timing initial_timing = {
+ .mode = XFER_PIO_0,
+ .setup = 70,
+ .act8b = 290,
+ .rec8b = 240,
+ .cyc8b = 600,
+ .active = 165,
+ .recover = 150,
+ .dmack_hold = 0,
+ .cycle = 600,
+ .udma = 0
+};
static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
{
@@ -109,6 +120,11 @@ static void set_smc_timing(struct device *dev,
/* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
ncs_read_setup = 1;
ncs_read_pulse = read_cycle - 2;
+ if (ncs_read_pulse > NCS_RD_PULSE_LIMIT) {
+ ncs_read_pulse = NCS_RD_PULSE_LIMIT;
+ dev_warn(dev, "ncs_read_pulse limited to maximal value %lu\n",
+ ncs_read_pulse);
+ }
/* Write timings same as read timings */
write_cycle = read_cycle;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f051cfff18af..9e0e4fc24c46 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -149,6 +149,7 @@ static void platform_device_release(struct device *dev)
of_device_node_put(&pa->pdev.dev);
kfree(pa->pdev.dev.platform_data);
+ kfree(pa->pdev.mfd_cell);
kfree(pa->pdev.resource);
kfree(pa);
}
@@ -771,7 +772,7 @@ int __weak platform_pm_resume_noirq(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int platform_pm_freeze(struct device *dev)
{
@@ -909,7 +910,7 @@ static int platform_pm_restore_noirq(struct device *dev)
return ret;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
@@ -920,7 +921,7 @@ static int platform_pm_restore_noirq(struct device *dev)
#define platform_pm_poweroff_noirq NULL
#define platform_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 052dc53eef38..fbc5b6e7c591 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -233,7 +233,7 @@ static int pm_op(struct device *dev,
}
break;
#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze) {
@@ -260,7 +260,7 @@ static int pm_op(struct device *dev,
suspend_report_result(ops->restore, error);
}
break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
default:
error = -EINVAL;
}
@@ -308,7 +308,7 @@ static int pm_noirq_op(struct device *dev,
}
break;
#endif /* CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze_noirq) {
@@ -335,7 +335,7 @@ static int pm_noirq_op(struct device *dev,
suspend_report_result(ops->restore_noirq, error);
}
break;
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
default:
error = -EINVAL;
}
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 90af2943f9e4..c126db3cb7d1 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -73,6 +73,7 @@ int syscore_suspend(void)
return ret;
}
+EXPORT_SYMBOL_GPL(syscore_suspend);
/**
* syscore_resume - Execute all the registered system core resume callbacks.
@@ -95,6 +96,7 @@ void syscore_resume(void)
"Interrupts enabled after %pF\n", ops->resume);
}
}
+EXPORT_SYMBOL_GPL(syscore_resume);
#endif /* CONFIG_PM_SLEEP */
/**
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 012cba0d6d96..b072648dc3f6 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
struct agp_memory *new;
unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+ if (INT_MAX/sizeof(struct page *) < num_agp_pages)
+ return NULL;
+
new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
if (new == NULL)
return NULL;
@@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
int scratch_pages;
struct agp_memory *new;
size_t i;
+ int cur_memory;
if (!bridge)
return NULL;
- if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
+ cur_memory = atomic_read(&bridge->current_memory_agp);
+ if ((cur_memory + page_count > bridge->max_memory_agp) ||
+ (cur_memory + page_count < page_count))
return NULL;
if (type >= AGP_USER_TYPES) {
@@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
return -EINVAL;
}
- /* AK: could wrap */
- if ((pg_start + mem->page_count) > num_entries)
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
return -EINVAL;
j = pg_start;
@@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
struct agp_bridge_data *bridge;
- int mask_type;
+ int mask_type, num_entries;
bridge = mem->bridge;
if (!bridge)
@@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
if (type != mem->type)
return -EINVAL;
+ num_entries = agp_num_entries();
+ if (((pg_start + mem->page_count) > num_entries) ||
+ ((pg_start + mem->page_count) < pg_start))
+ return -EINVAL;
+
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0) {
/* The generic routines know nothing of memory types */
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 84b164d1eb2b..838568a7dbf5 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port)
spin_lock_irq(&pdrvdata_lock);
list_del(&port->cons.list);
spin_unlock_irq(&pdrvdata_lock);
-#if 0
- /*
- * hvc_remove() not called as removing one hvc port
- * results in other hvc ports getting frozen.
- *
- * Once this is resolved in hvc, this functionality
- * will be enabled. Till that is done, the -EPIPE
- * return from get_chars() above will help
- * hvc_console.c to clean up on ports we remove here.
- */
hvc_remove(port->cons.hvc);
-#endif
}
/* Remove unused data this port might have received. */
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index d77005849af8..219d88a0eeae 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
+ err = 0;
}
return err;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 6b396759e7f5..8a781540590c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1448,7 +1448,7 @@ static const struct of_device_id fsldma_of_ids[] = {
{}
};
-static struct of_platform_driver fsldma_of_driver = {
+static struct platform_driver fsldma_of_driver = {
.driver = {
.name = "fsl-elo-dma",
.owner = THIS_MODULE,
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index 7f6f01a4b145..0a775f7987c2 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -116,6 +116,7 @@ static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val |= (1 << nr);
else
reg_val &= ~(1 << nr);
+ iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
mutex_unlock(&chip->lock);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 583e92592073..7630ab7b9bec 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -558,7 +558,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
- goto out_failed;
+ goto out_failed_irq;
if (pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
@@ -570,8 +570,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
return 0;
-out_failed:
+out_failed_irq:
pca953x_irq_teardown(chip);
+out_failed:
kfree(chip->dyn_pdata);
kfree(chip);
return ret;
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 2c6af8705103..f970a5f3585e 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -105,6 +105,7 @@ static int pch_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
reg_val |= (1 << nr);
else
reg_val &= ~(1 << nr);
+ iowrite32(reg_val, &chip->reg->po);
mutex_unlock(&chip->lock);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a6feb78c404c..c58f691ec3ce 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -96,6 +96,7 @@ config DRM_I915
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 432fc04c6bff..e522c702b04e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3771,8 +3771,11 @@ static bool g4x_compute_wm0(struct drm_device *dev,
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
- if (crtc->fb == NULL || !crtc->enabled)
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *cursor_wm = cursor->guard_size;
+ *plane_wm = display->guard_size;
return false;
+ }
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
@@ -6215,36 +6218,6 @@ cleanup_work:
return ret;
}
-static void intel_crtc_reset(struct drm_crtc *crtc)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /* Reset flags back to the 'unknown' status so that they
- * will be correctly set on the initial modeset.
- */
- intel_crtc->dpms_mode = -1;
-}
-
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
- .dpms = intel_crtc_dpms,
- .mode_fixup = intel_crtc_mode_fixup,
- .mode_set = intel_crtc_mode_set,
- .mode_set_base = intel_pipe_set_base,
- .mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
- .disable = intel_crtc_disable,
-};
-
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .reset = intel_crtc_reset,
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
- .gamma_set = intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = intel_crtc_page_flip,
-};
-
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
@@ -6281,6 +6254,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
intel_disable_pipe(dev_priv, pipe);
}
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ /* Reset flags back to the 'unknown' status so that they
+ * will be correctly set on the initial modeset.
+ */
+ intel_crtc->dpms_mode = -1;
+
+ /* We need to fix up any BIOS configuration that conflicts with
+ * our expectations.
+ */
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
+}
+
+static struct drm_crtc_helper_funcs intel_helper_funcs = {
+ .dpms = intel_crtc_dpms,
+ .mode_fixup = intel_crtc_mode_fixup,
+ .mode_set = intel_crtc_mode_set,
+ .mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
+ .load_lut = intel_crtc_load_lut,
+ .disable = intel_crtc_disable,
+};
+
+static const struct drm_crtc_funcs intel_crtc_funcs = {
+ .reset = intel_crtc_reset,
+ .cursor_set = intel_crtc_cursor_set,
+ .cursor_move = intel_crtc_cursor_move,
+ .gamma_set = intel_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = intel_crtc_destroy,
+ .page_flip = intel_crtc_page_flip,
+};
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -6330,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
-
- intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4256b8ef3947..6b22c1dcc015 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1151,10 +1151,10 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
int pipeconf_reg = PIPECONF(pipe);
- int dspcntr_reg = DSPCNTR(pipe);
+ int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = DSPADDR(pipe);
+ int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
@@ -1378,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
if (type < 0)
return connector_status_disconnected;
+ intel_tv->type = type;
intel_tv_find_better_format(connector);
+
return connector_status_connected;
}
@@ -1670,8 +1672,7 @@ intel_tv_init(struct drm_device *dev)
*
* More recent chipsets favour HDMI rather than integrated S-Video.
*/
- connector->polled =
- DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_init(dev, connector, &intel_tv_connector_funcs,
DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8314a49b6b9a..90aef64b76f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -269,7 +269,7 @@ struct init_tbl_entry {
int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
};
-static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
+static int parse_init_table(struct nvbios *, uint16_t, struct init_exec *);
#define MACRO_INDEX_SIZE 2
#define MACRO_SIZE 8
@@ -2011,6 +2011,27 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
}
static int
+init_jump(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_JUMP opcode: 0x5C ('\')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): offset (in bios)
+ *
+ * Continue execution of init table from 'offset'
+ */
+
+ uint16_t jmp_offset = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Jump to 0x%04X\n", offset, jmp_offset);
+ return jmp_offset - offset;
+}
+
+static int
init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -3659,6 +3680,7 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
{ "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_JUMP" , 0x5C, init_jump },
{ "INIT_I2C_IF" , 0x5E, init_i2c_if },
{ "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
{ "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
@@ -3700,8 +3722,7 @@ static struct init_tbl_entry itbl_entry[] = {
#define MAX_TABLE_OPS 1000
static int
-parse_init_table(struct nvbios *bios, unsigned int offset,
- struct init_exec *iexec)
+parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
* Parses all commands in an init table.
@@ -6333,6 +6354,32 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* XFX GT-240X-YA
+ *
+ * So many things wrong here, replace the entire encoder table..
+ */
+ if (nv_match_device(dev, 0x0ca3, 0x1682, 0x3003)) {
+ if (idx == 0) {
+ *conn = 0x02001300; /* VGA, connector 1 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 1) {
+ *conn = 0x01010312; /* DVI, connector 0 */
+ *conf = 0x00020030;
+ } else
+ if (idx == 2) {
+ *conn = 0x01010310; /* VGA, connector 0 */
+ *conf = 0x00000028;
+ } else
+ if (idx == 3) {
+ *conn = 0x02022362; /* HDMI, connector 2 */
+ *conf = 0x00020010;
+ } else {
+ *conn = 0x0000000e; /* EOL */
+ *conf = 0x00000000;
+ }
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index ce38e97b9428..568caedd7216 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
&chan->m2mf_ntfy);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 57e5302503db..a76514a209b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -682,6 +682,9 @@ struct drm_nouveau_private {
/* For PFIFO and PGRAPH. */
spinlock_t context_switch_lock;
+ /* VM/PRAMIN flush, legacy PRAMIN aperture */
+ spinlock_t vm_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramfc;
@@ -1190,7 +1193,7 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
-extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern void nv84_graph_tlb_flush(struct drm_device *dev);
extern struct nouveau_enum nv50_data_error_names[];
/* nvc0_graph.c */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 889c4454682e..39aee6d4daf8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
OUT_RING (chan, 0);
}
- nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
+ nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
FIRE_RING(chan);
mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
- if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
+ if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
ret = 0;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2683377f4131..5045f8b921d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
dma_bits = 40;
} else
if (drm_pci_device_is_pcie(dev) &&
- dev_priv->chipset != 0x40 &&
+ dev_priv->chipset > 0x40 &&
dev_priv->chipset != 0x45) {
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
dma_bits = 39;
@@ -552,6 +552,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
u8 tRC; /* Byte 9 */
u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 magic_number = 0; /* Yeah... sorry*/
u8 *mem = NULL, *entry;
int i, recordlen, entries;
@@ -596,6 +597,12 @@ nouveau_mem_timing_init(struct drm_device *dev)
if (!memtimings->timing)
return;
+ /* Get "some number" from the timing reg for NV_40
+ * Used in calculations later */
+ if(dev_priv->card_type == NV_40) {
+ magic_number = (nv_rd32(dev,0x100228) & 0x0f000000) >> 24;
+ }
+
entry = mem + mem[1];
for (i = 0; i < entries; i++, entry += recordlen) {
struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
@@ -635,36 +642,51 @@ nouveau_mem_timing_init(struct drm_device *dev)
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere! */
- timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+ timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
tUNK_18 << 16 |
- (tUNK_1 + tUNK_19 + 1) << 8 |
- (tUNK_2 - 1));
+ (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
+ if(dev_priv->chipset == 0xa8) {
+ timing->reg_100224 |= (tUNK_2 - 1);
+ } else {
+ timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
+ }
timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
- if(recordlen > 19) {
- timing->reg_100228 += (tUNK_19 - 1) << 24;
- }/* I cannot back-up this else-statement right now
- else {
- timing->reg_100228 += tUNK_12 << 24;
- }*/
-
- /* XXX: reg_10022c */
- timing->reg_10022c = tUNK_2 - 1;
-
- timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
- tUNK_13 << 8 | tUNK_13);
-
- /* XXX: +6? */
- timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
- timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
-
- /* XXX; reg_100238, reg_10023c
- * reg: 0x00??????
- * reg_10023c:
- * 0 for pre-NV50 cards
- * 0x????0202 for NV50+ cards (empirical evidence) */
- if(dev_priv->card_type >= NV_50) {
+ if(dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa) {
+ timing->reg_100228 |= (tUNK_19 - 1) << 24;
+ }
+
+ if(dev_priv->card_type == NV_40) {
+ /* NV40: don't know what the rest of the regs are..
+ * And don't need to know either */
+ timing->reg_100228 |= 0x20200000 | magic_number << 24;
+ } else if(dev_priv->card_type >= NV_50) {
+ /* XXX: reg_10022c */
+ timing->reg_10022c = tUNK_2 - 1;
+
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
+
+ timing->reg_100234 = (tRAS << 24 | tRC);
+ timing->reg_100234 += max(tUNK_10,tUNK_11) << 16;
+
+ if(dev_priv->chipset < 0xa3) {
+ timing->reg_100234 |= (tUNK_2 + 2) << 8;
+ } else {
+ /* XXX: +6? */
+ timing->reg_100234 |= (tUNK_19 + 6) << 8;
+ }
+
+ /* XXX; reg_100238, reg_10023c
+ * reg_100238: 0x00??????
+ * reg_10023c: 0x!!??0202 for NV50+ cards (empirical evidence) */
timing->reg_10023c = 0x202;
+ if(dev_priv->chipset < 0xa3) {
+ timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
+ } else {
+ /* currently unknown
+ * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+ }
}
NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
@@ -675,7 +697,7 @@ nouveau_mem_timing_init(struct drm_device *dev)
timing->reg_100238, timing->reg_10023c);
}
- memtimings->nr_timing = entries;
+ memtimings->nr_timing = entries;
memtimings->supported = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 7ba3fc0b30c1..5b39718ae1f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
- uint32_t flags;
+ uint32_t flags, ttmpl;
int ret;
- if (nouveau_vram_notify)
+ if (nouveau_vram_notify) {
flags = NOUVEAU_GEM_DOMAIN_VRAM;
- else
+ ttmpl = TTM_PL_FLAG_VRAM;
+ } else {
flags = NOUVEAU_GEM_DOMAIN_GART;
+ ttmpl = TTM_PL_FLAG_TT;
+ }
ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, flags);
+ ret = nouveau_bo_pin(ntfy, ttmpl);
if (ret)
goto out_err;
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 4f00c87ed86e..67a16e01ffa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
u32 val;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return val;
}
@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
{
struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
struct drm_device *dev = gpuobj->dev;
+ unsigned long flags;
if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
u64 ptr = gpuobj->vinst + offset;
u32 base = ptr >> 16;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
if (dev_priv->ramin_base != base) {
dev_priv->ramin_base = base;
nv_wr32(dev, 0x001700, dev_priv->ramin_base);
}
nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index ac62a1b8c4fc..670e3cb697ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -134,7 +134,7 @@ nouveau_perf_init(struct drm_device *dev)
case 0x13:
case 0x15:
perflvl->fanspeed = entry[55];
- perflvl->voltage = entry[56];
+ perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
perflvl->core = ROM32(entry[1]) * 10;
perflvl->memory = ROM32(entry[5]) * 20;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a33fe4019286..4bce801bc588 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
be->func->clear(be);
return -EFAULT;
}
+ nvbe->ttm_alloced[nvbe->nr_pages] = false;
}
nvbe->nr_pages++;
@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev)
u32 aper_size, align;
int ret;
- if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
+ if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
aper_size = 512 * 1024 * 1024;
else
aper_size = 64 * 1024 * 1024;
@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
dev_priv->gart_info.func = &nv50_sgdma_backend;
} else
if (drm_pci_device_is_pcie(dev) &&
- dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
if (nv44_graph_class(dev)) {
dev_priv->gart_info.func = &nv44_sgdma_backend;
align = 512 * 1024;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 5bb2859001e2..a30adec5beaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -376,15 +376,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
- if (dev_priv->chipset != 0x86)
+ if (dev_priv->chipset == 0x50 ||
+ dev_priv->chipset == 0xac)
engine->graph.tlb_flush = nv50_graph_tlb_flush;
- else {
- /* from what i can see nvidia do this on every
- * pre-NVA3 board except NVAC, but, we've only
- * ever seen problems on NV86
- */
- engine->graph.tlb_flush = nv86_graph_tlb_flush;
- }
+ else
+ engine->graph.tlb_flush = nv84_graph_tlb_flush;
engine->fifo.channels = 128;
engine->fifo.init = nv50_fifo_init;
engine->fifo.takedown = nv50_fifo_takedown;
@@ -612,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
spin_lock_init(&dev_priv->channels.lock);
spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
+ spin_lock_init(&dev_priv->vm_lock);
/* Make the CRTCs and I2C buses accessible */
ret = engine->display.early_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index c82db37d9f41..12098bf839c4 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -581,12 +581,13 @@ static void nv04_dfp_restore(struct drm_encoder *encoder)
int head = nv_encoder->restore.head;
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
- if (native_mode)
- call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
- native_mode->clock);
- else
- NV_ERROR(dev, "Not restoring LVDS without native mode\n");
+ struct nouveau_connector *connector =
+ nouveau_encoder_connector_get(nv_encoder);
+
+ if (connector && connector->native_mode)
+ call_lvds_script(dev, nv_encoder->dcb, head,
+ LVDS_PANEL_ON,
+ connector->native_mode->clock);
} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
int clock = nouveau_hw_pllvals_to_clk
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 2b9984027f41..a19ccaa025b3 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -469,9 +469,6 @@ nv50_crtc_wait_complete(struct drm_crtc *crtc)
start = ptimer->read(dev);
do {
- nv_wr32(dev, 0x61002c, 0x370);
- nv_wr32(dev, 0x000140, 1);
-
if (nv_ro32(disp->ntfy, 0x000))
return 0;
} while (ptimer->read(dev) - start < 2000000000ULL);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index a2cfaa691e9b..c8e83c1a4de8 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -186,6 +186,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
evo->dma.max = (4096/4) - 2;
+ evo->dma.max &= ~7;
evo->dma.put = 0;
evo->dma.cur = evo->dma.put;
evo->dma.free = evo->dma.max - evo->dma.cur;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8675b00caf18..b02a5b1e7d37 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -503,7 +503,7 @@ nv50_graph_tlb_flush(struct drm_device *dev)
}
void
-nv86_graph_tlb_flush(struct drm_device *dev)
+nv84_graph_tlb_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a6f8aa651fc6..4f95a1e5822e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -404,23 +404,25 @@ void
nv50_instmem_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 4fd3432b5b8d..6c2694490741 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -174,10 +174,11 @@ void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
- spin_lock(&dev_priv->ramin_lock);
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
- spin_unlock(&dev_priv->ramin_lock);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index 69af0ba7edd3..a179e6c55afb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -104,20 +104,27 @@ nvc0_vm_flush(struct nouveau_vm *vm)
struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
struct drm_device *dev = vm->dev;
struct nouveau_vm_pgd *vpgd;
- u32 r100c80, engine;
+ unsigned long flags;
+ u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
pinstmem->flush(vm->dev);
- if (vm == dev_priv->chan_vm)
- engine = 1;
- else
- engine = 5;
-
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- r100c80 = nv_rd32(dev, 0x100c80);
+ /* looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases
+ */
+ if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
+ NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
- if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
- NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+ /* wait for flush to be queued? */
+ if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
+ NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
}
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 258fa5e7a2d9..7bd745689097 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -32,6 +32,7 @@
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
+#include "radeon.h"
#define ATOM_COND_ABOVE 0
#define ATOM_COND_ABOVEOREQUAL 1
@@ -101,7 +102,9 @@ static void debug_print_spaces(int n)
static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
uint32_t index, uint32_t data)
{
+ struct radeon_device *rdev = ctx->card->dev->dev_private;
uint32_t temp = 0xCDCDCDCD;
+
while (1)
switch (CU8(base)) {
case ATOM_IIO_NOP:
@@ -112,7 +115,8 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
- (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ if (rdev->family == CHIP_RV515)
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
@@ -131,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_INDEX:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((index >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -141,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_DATA:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((data >> CU8(base + 2)) &
(0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -151,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
case ATOM_IIO_MOVE_ATTR:
temp &=
~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
- CU8(base + 2));
+ CU8(base + 3));
temp |=
((ctx->
io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b41ec59c7100..529a3a704731 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -531,6 +531,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+
+ if (rdev->family < CHIP_RV770)
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
} else {
pll->flags |= RADEON_PLL_LEGACY;
@@ -559,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 0b0cc74c08c0..e9bc135d9189 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -120,11 +120,16 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
- if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
- if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ if (voltage->type == VOLTAGE_SW) {
+ if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
- DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
+ }
+ if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
+ radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
+ rdev->pm.current_vddci = voltage->vddci;
+ DRM_DEBUG("Setting: vddci: %d\n", voltage->vddci);
}
}
}
@@ -348,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp = 0;
+ u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -358,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* first display controller
* 0 - first half of lb (3840 * 2)
* 1 - first 3/4 of lb (5760 * 2)
- * 2 - whole lb (7680 * 2)
+ * 2 - whole lb (7680 * 2), other crtc must be disabled
* 3 - first 1/4 of lb (1920 * 2)
* second display controller
* 4 - second half of lb (3840 * 2)
* 5 - second 3/4 of lb (5760 * 2)
- * 6 - whole lb (7680 * 2)
+ * 6 - whole lb (7680 * 2), other crtc must be disabled
* 7 - last 1/4 of lb (1920 * 2)
*/
- if (mode && other_mode) {
- if (mode->hdisplay > other_mode->hdisplay) {
- if (mode->hdisplay > 2560)
- tmp = 1; /* 3/4 */
- else
- tmp = 0; /* 1/2 */
- } else if (other_mode->hdisplay > mode->hdisplay) {
- if (other_mode->hdisplay > 2560)
- tmp = 3; /* 1/4 */
- else
- tmp = 0; /* 1/2 */
- } else
+ /* this can get tricky if we have two large displays on a paired group
+ * of crtcs. Ideally for multiple large displays we'd assign them to
+ * non-linked crtcs for maximum line buffer allocation.
+ */
+ if (radeon_crtc->base.enabled && mode) {
+ if (other_mode)
tmp = 0; /* 1/2 */
- } else if (mode)
- tmp = 2; /* whole */
- else if (other_mode)
- tmp = 3; /* 1/4 */
+ else
+ tmp = 2; /* whole */
+ } else
+ tmp = 0;
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
- switch (tmp) {
- case 0:
- case 4:
- default:
- if (ASIC_IS_DCE5(rdev))
- return 4096 * 2;
- else
- return 3840 * 2;
- case 1:
- case 5:
- if (ASIC_IS_DCE5(rdev))
- return 6144 * 2;
- else
- return 5760 * 2;
- case 2:
- case 6:
- if (ASIC_IS_DCE5(rdev))
- return 8192 * 2;
- else
- return 7680 * 2;
- case 3:
- case 7:
- if (ASIC_IS_DCE5(rdev))
- return 2048 * 2;
- else
- return 1920 * 2;
+ if (radeon_crtc->base.enabled && mode) {
+ switch (tmp) {
+ case 0:
+ case 4:
+ default:
+ if (ASIC_IS_DCE5(rdev))
+ return 4096 * 2;
+ else
+ return 3840 * 2;
+ case 1:
+ case 5:
+ if (ASIC_IS_DCE5(rdev))
+ return 6144 * 2;
+ else
+ return 5760 * 2;
+ case 2:
+ case 6:
+ if (ASIC_IS_DCE5(rdev))
+ return 8192 * 2;
+ else
+ return 7680 * 2;
+ case 3:
+ case 7:
+ if (ASIC_IS_DCE5(rdev))
+ return 2048 * 2;
+ else
+ return 1920 * 2;
+ }
}
+
+ /* controller not enabled, so no lb used */
+ return 0;
}
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
@@ -2576,7 +2580,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
@@ -3036,9 +3040,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@@ -3150,7 +3151,6 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index be271c42de4d..6f27593901c7 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -587,7 +587,7 @@ void r600_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
@@ -2509,9 +2509,6 @@ int r600_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
@@ -2625,7 +2622,6 @@ void r600_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
@@ -3235,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
u32 wptr, tmp;
if (rdev->wb.enabled)
- wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4];
+ wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 93f536594c73..ba643b576054 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -177,7 +177,7 @@ void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
@@ -767,7 +767,9 @@ struct radeon_voltage {
u8 vddci_id; /* index into vddci voltage table */
bool vddci_enabled;
/* r6xx+ sw */
- u32 voltage;
+ u16 voltage;
+ /* evergreen+ vddci */
+ u16 vddci;
};
/* clock mode flags */
@@ -835,10 +837,12 @@ struct radeon_pm {
int default_power_state_index;
u32 current_sclk;
u32 current_mclk;
- u32 current_vddc;
+ u16 current_vddc;
+ u16 current_vddci;
u32 default_sclk;
u32 default_mclk;
- u32 default_vddc;
+ u16 default_vddc;
+ u16 default_vddci;
struct radeon_i2c_chan *i2c_bus;
/* selected pm method */
enum radeon_pm_method pm_method;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index eb888ee5f674..ca576191d058 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
+ if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 99768d9d91da..f5d12fb103fa 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2176,24 +2176,27 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
}
}
-static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
u8 frev, crev;
u16 data_offset;
union firmware_info *firmware_info;
- u16 vddc = 0;
+
+ *vddc = 0;
+ *vddci = 0;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
firmware_info =
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
- vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
+ if ((frev == 2) && (crev >= 2))
+ *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
}
-
- return vddc;
}
static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
@@ -2203,7 +2206,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
- u16 vddc = radeon_atombios_get_default_vddc(rdev);
+ u16 vddc, vddci;
+
+ radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2244,6 +2249,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+ rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
/* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
@@ -2286,6 +2292,8 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->evergreen.usVDDC);
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
+ le16_to_cpu(clock_info->evergreen.usVDDCI);
} else {
sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2577,25 +2585,25 @@ union set_voltage {
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
};
-void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level)
+void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev, volt_index = level;
+ u8 frev, crev, volt_index = voltage_level;
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
switch (crev) {
case 1:
- args.v1.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v1.ucVoltageType = voltage_type;
args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE;
args.v1.ucVoltageIndex = volt_index;
break;
case 2:
- args.v2.ucVoltageType = SET_VOLTAGE_TYPE_ASIC_VDDC;
+ args.v2.ucVoltageType = voltage_type;
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
- args.v2.usVoltageLevel = cpu_to_le16(level);
+ args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2ef6d5135064..5f45fa12bb8b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
if (!radeon_connector->router_bus)
- goto failed;
+ DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
@@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
@@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
@@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev,
else
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
subpixel_order = SubPixelHorizontalRGB;
drm_connector_attach_property(&radeon_connector->base,
@@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
@@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
if (i2c_bus->valid) {
radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
- goto failed;
+ DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
}
drm_connector_attach_property(&radeon_connector->base,
dev->mode_config.scaling_mode_property,
@@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_legacy_backlight_init(radeon_encoder, connector);
}
}
- return;
-
-failed:
- drm_connector_cleanup(connector);
- kfree(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9e59868d354e..bbcd1dd7bac0 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -79,7 +79,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = rdev->wb.wb[scratch_index/4];
+ seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (seq != rdev->fence_drv.last_seq) {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index f0534ef2f331..8a955bbdb608 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -285,4 +285,6 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
rdev->gart.ttm_alloced = NULL;
+
+ radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ded2a45bc95c..983cbac75af0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1062,7 +1062,7 @@ void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
*val = in_buf[0];
DRM_DEBUG("val = 0x%02x\n", *val);
} else {
- DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+ DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
addr, *val);
}
}
@@ -1084,7 +1084,7 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
out_buf[1] = val;
if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
- DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
addr, val);
}
@@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
if (!radeon_connector->router.ddc_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
@@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
if (!radeon_connector->router.cd_valid)
return;
+ if (!radeon_connector->router_bus)
+ return;
+
radeon_i2c_get_byte(radeon_connector->router_bus,
radeon_connector->router.i2c_addr,
0x3, &val);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5b54268ed6b2..2f46e0c8df53 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -269,7 +269,7 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
.disable = radeon_legacy_encoder_disable,
};
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
#define MAX_RADEON_LEVEL 0xFF
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 08de669e025a..86eda1ea94df 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -23,6 +23,7 @@
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
+#include "atom.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
@@ -535,7 +536,11 @@ void radeon_pm_resume(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
+ if (rdev->pm.default_vddci)
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
+ SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
@@ -548,6 +553,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
@@ -585,7 +591,8 @@ int radeon_pm_init(struct radeon_device *rdev)
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+ radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
+ SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index bbc9cd823334..c6776e48fdde 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -248,7 +248,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
void radeon_ring_free_size(struct radeon_device *rdev)
{
if (rdev->wb.enabled)
- rdev->cp.rptr = rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4];
+ rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 876cebc4b8ba..6e3b11e5abbe 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -114,7 +114,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
udelay(voltage->delay);
}
} else if (voltage->type == VOLTAGE_VDDC)
- radeon_atom_set_voltage(rdev, voltage->vddc_id);
+ radeon_atom_set_voltage(rdev, voltage->vddc_id, SET_VOLTAGE_TYPE_ASIC_VDDC);
dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b974ac7df8df..ef8a5babe9f7 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -106,7 +106,7 @@ void rv770_pm_misc(struct radeon_device *rdev)
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
if (voltage->voltage != rdev->pm.current_vddc) {
- radeon_atom_set_voltage(rdev, voltage->voltage);
+ radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
rdev->pm.current_vddc = voltage->voltage;
DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
}
@@ -1255,9 +1255,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- r = radeon_dummy_page_init(rdev);
- if (r)
- return r;
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
@@ -1372,7 +1369,6 @@ void rv770_fini(struct radeon_device *rdev)
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
- radeon_dummy_page_fini(rdev);
}
static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 737a2a2e46a5..9d9d92945f8c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -683,22 +683,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
- if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
- void *addr;
- addr = dma_alloc_coherent(NULL, PAGE_SIZE,
- &dma_address[r],
- gfp_flags);
- if (addr == NULL)
- return -ENOMEM;
- p = virt_to_page(addr);
- } else
- p = alloc_page(gfp_flags);
+ p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
+
list_add(&p->lru, pages);
}
return 0;
@@ -746,24 +738,12 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
- unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
- r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
- if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
- void *addr = page_address(p);
- WARN_ON(!addr || !dma_address[r]);
- if (addr)
- dma_free_coherent(NULL, PAGE_SIZE,
- addr,
- dma_address[r]);
- dma_address[r] = 0;
- } else
- __free_page(p);
- r--;
+ __free_page(p);
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 70e60a4bb678..419917955bf6 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -5,6 +5,7 @@ config STUB_POULSBO
# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_CLASS_DEVICE if ACPI
+ select VIDEO_OUTPUT_CONTROL if ACPI
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select THERMAL if ACPI
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index edfb92e41735..196ffafafd88 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -139,7 +139,6 @@ struct pmbus_data {
* A single status register covers multiple attributes,
* so we keep them all together.
*/
- u8 status_bits;
u8 status[PB_NUM_STATUS_REG];
u8 currpage;
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 38319a69bd0a..d6d58684712b 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
* Sanity check for the adapter hardware - check the reaction of
* the bus lines only if it seems to be idle.
*/
-static int test_bus(struct i2c_algo_bit_data *adap, char *name)
+static int test_bus(struct i2c_adapter *i2c_adap)
{
- int scl, sda;
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+ const char *name = i2c_adap->name;
+ int scl, sda, ret;
+
+ if (adap->pre_xfer) {
+ ret = adap->pre_xfer(i2c_adap);
+ if (ret < 0)
+ return -ENODEV;
+ }
if (adap->getscl == NULL)
pr_info("%s: Testing SDA only, SCL is not readable\n", name);
@@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name)
"while pulling SCL high!\n", name);
goto bailout;
}
+
+ if (adap->post_xfer)
+ adap->post_xfer(i2c_adap);
+
pr_info("%s: Test OK\n", name);
return 0;
bailout:
sdahi(adap);
sclhi(adap);
+
+ if (adap->post_xfer)
+ adap->post_xfer(i2c_adap);
+
return -ENODEV;
}
@@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
int ret;
if (bit_test) {
- ret = test_bus(bit_adap, adap->name);
+ ret = test_bus(adap);
if (ret < 0)
return -ENODEV;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 70c30e6bce0b..9a58994ff7ea 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
/* Let legacy drivers scan this bus for matching devices */
if (driver->attach_adapter) {
- dev_warn(&adap->dev, "attach_adapter method is deprecated\n");
+ dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
+ driver->driver.name);
dev_warn(&adap->dev, "Please use another way to instantiate "
"your i2c_client\n");
/* We ignore the return code; if it fails, too bad */
@@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
if (!driver->detach_adapter)
return 0;
- dev_warn(&adapter->dev, "detach_adapter method is deprecated\n");
+ dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n",
+ driver->driver.name);
res = driver->detach_adapter(adapter);
if (res)
dev_err(&adapter->dev, "detach_adapter failed (%d) "
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fd1e11799137..a5ec5a7cb381 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 2a6bc50e8a41..02caa7dd51c8 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -79,6 +79,12 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_DRIVE_NOT_READY;
}
+/*
+ * ide-cd always generates media changed event if media is missing, which
+ * makes it impossible to use for proper event reporting, so disk->events
+ * is cleared to 0 and the following function is used only to trigger
+ * revalidation and never propagated to userland.
+ */
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr)
{
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index c4ffd4888939..70ea8763567d 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -298,6 +298,12 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
return 0;
}
+ /*
+ * The following is used to force revalidation on the first open on
+ * removeable devices, and never gets reported to userland as
+ * genhd->events is 0. This is intended as removeable ide disk
+ * can't really detect MEDIA_CHANGE events.
+ */
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -413,7 +419,6 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 7f42d3a454d2..88d8e4cb419a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -39,13 +39,13 @@ struct evdev {
};
struct evdev_client {
- int head;
- int tail;
+ unsigned int head;
+ unsigned int tail;
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
- int bufsize;
+ unsigned int bufsize;
struct input_event buffer[];
};
@@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex);
static void evdev_pass_event(struct evdev_client *client,
struct input_event *event)
{
- /*
- * Interrupts are disabled, just acquire the lock.
- * Make sure we don't leave with the client buffer
- * "empty" by having client->head == client->tail.
- */
+ /* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
- do {
- client->buffer[client->head++] = *event;
- client->head &= client->bufsize - 1;
- } while (client->head == client->tail);
+
+ client->buffer[client->head++] = *event;
+ client->head &= client->bufsize - 1;
+
+ if (unlikely(client->head == client->tail)) {
+ /*
+ * This effectively "drops" all unconsumed events, leaving
+ * EV_SYN/SYN_DROPPED plus the newest event in the queue.
+ */
+ client->tail = (client->head - 2) & (client->bufsize - 1);
+
+ client->buffer[client->tail].time = event->time;
+ client->buffer[client->tail].type = EV_SYN;
+ client->buffer[client->tail].code = SYN_DROPPED;
+ client->buffer[client->tail].value = 0;
+ }
+
spin_unlock(&client->buffer_lock);
if (event->type == EV_SYN)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d6e8bd8a851c..ebbceedc92f4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
}
EXPORT_SYMBOL(input_set_capability);
+static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
+{
+ int mt_slots;
+ int i;
+ unsigned int events;
+
+ if (dev->mtsize) {
+ mt_slots = dev->mtsize;
+ } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
+ mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
+ dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
+ clamp(mt_slots, 2, 32);
+ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+ mt_slots = 2;
+ } else {
+ mt_slots = 0;
+ }
+
+ events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
+
+ for (i = 0; i < ABS_CNT; i++) {
+ if (test_bit(i, dev->absbit)) {
+ if (input_is_mt_axis(i))
+ events += mt_slots;
+ else
+ events++;
+ }
+ }
+
+ for (i = 0; i < REL_CNT; i++)
+ if (test_bit(i, dev->relbit))
+ events++;
+
+ return events;
+}
+
#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
do { \
if (!test_bit(EV_##type, dev->evbit)) \
@@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev)
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
+ if (!dev->hint_events_per_packet)
+ dev->hint_events_per_packet =
+ input_estimate_events_per_packet(dev);
+
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 09bef79d9da1..a26922cf0e84 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
static int __devinit twl4030_kp_probe(struct platform_device *pdev)
{
struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
- const struct matrix_keymap_data *keymap_data = pdata->keymap_data;
+ const struct matrix_keymap_data *keymap_data;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
int error;
- if (!pdata || !pdata->rows || !pdata->cols ||
+ if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
dev_err(&pdev->dev, "Invalid platform_data\n");
return -EINVAL;
}
+ keymap_data = pdata->keymap_data;
+
kp = kzalloc(sizeof(*kp), GFP_KERNEL);
input = input_allocate_device();
if (!kp || !input) {
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 7077f9bf5ead..62bae99424e6 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
- int val;
+ int ret, val;
switch (backend_state) {
case XenbusStateInitialising:
@@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
case XenbusStateInitWait:
InitWait:
+ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "feature-abs-pointer", "%d", &val);
+ if (ret < 0)
+ val = 0;
+ if (val) {
+ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
+ "request-abs-pointer", "1");
+ if (ret)
+ pr_warning("xenkbd: can't request abs-pointer");
+ }
+
xenbus_switch_state(dev, XenbusStateConnected);
break;
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index efa06882de00..45f93d0f5592 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
err = -EBUSY;
- goto fail2;
+ goto fail1;
}
if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
err = -EBUSY;
- goto fail3;
+ goto fail2;
}
serio_set_drvdata(serio, ts);
err = serio_open(serio, drv);
if (err)
- return err;
+ goto fail3;
//h3600_flite_control(1, 25); /* default brightness */
- input_register_device(ts->dev);
+ err = input_register_device(ts->dev);
+ if (err)
+ goto fail4;
return 0;
-fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
+fail4: serio_close(serio);
+fail3: serio_set_drvdata(serio, NULL);
+ free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
-fail1: serio_set_drvdata(serio, NULL);
- input_free_device(input_dev);
+fail1: input_free_device(input_dev);
kfree(ts);
return err;
}
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 3790816643be..8497f56f8e46 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -178,6 +178,10 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->vcc = vcc;
+ /* to handle correctly an already enabled regulator */
+ if (regulator_is_enabled(led->vcc))
+ led->enabled = 1;
+
mutex_init(&led->mutex);
INIT_WORK(&led->work, led_work);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5ef136cdba91..e5d8904fc8f6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
return md_raid5_congested(&rs->md, bits);
}
-static void raid_unplug(struct dm_target_callbacks *cb)
-{
- struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
-
- md_raid5_kick_device(rs->md.private);
-}
-
/*
* Construct a RAID4/5/6 mapping:
* Args:
@@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
rs->callbacks.congested_fn = raid_is_congested;
- rs->callbacks.unplug_fn = raid_unplug;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b12b3776c0c0..7d6f7f18a920 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request);
/* Support for plugging.
* This mirrors the plugging support in request_queue, but does not
- * require having a whole queue
+ * require having a whole queue or request structures.
+ * We allocate an md_plug_cb for each md device and each thread it gets
+ * plugged on. This links tot the private plug_handle structure in the
+ * personality data where we keep a count of the number of outstanding
+ * plugs so other code can see if a plug is active.
*/
-static void plugger_work(struct work_struct *work)
-{
- struct plug_handle *plug =
- container_of(work, struct plug_handle, unplug_work);
- plug->unplug_fn(plug);
-}
-static void plugger_timeout(unsigned long data)
-{
- struct plug_handle *plug = (void *)data;
- kblockd_schedule_work(NULL, &plug->unplug_work);
-}
-void plugger_init(struct plug_handle *plug,
- void (*unplug_fn)(struct plug_handle *))
-{
- plug->unplug_flag = 0;
- plug->unplug_fn = unplug_fn;
- init_timer(&plug->unplug_timer);
- plug->unplug_timer.function = plugger_timeout;
- plug->unplug_timer.data = (unsigned long)plug;
- INIT_WORK(&plug->unplug_work, plugger_work);
-}
-EXPORT_SYMBOL_GPL(plugger_init);
+struct md_plug_cb {
+ struct blk_plug_cb cb;
+ mddev_t *mddev;
+};
-void plugger_set_plug(struct plug_handle *plug)
+static void plugger_unplug(struct blk_plug_cb *cb)
{
- if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag))
- mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1);
+ struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
+ if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
+ md_wakeup_thread(mdcb->mddev->thread);
+ kfree(mdcb);
}
-EXPORT_SYMBOL_GPL(plugger_set_plug);
-int plugger_remove_plug(struct plug_handle *plug)
+/* Check that an unplug wakeup will come shortly.
+ * If not, wakeup the md thread immediately
+ */
+int mddev_check_plugged(mddev_t *mddev)
{
- if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) {
- del_timer(&plug->unplug_timer);
- return 1;
- } else
+ struct blk_plug *plug = current->plug;
+ struct md_plug_cb *mdcb;
+
+ if (!plug)
return 0;
-}
-EXPORT_SYMBOL_GPL(plugger_remove_plug);
+ list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
+ if (mdcb->cb.callback == plugger_unplug &&
+ mdcb->mddev == mddev) {
+ /* Already on the list, move to top */
+ if (mdcb != list_first_entry(&plug->cb_list,
+ struct md_plug_cb,
+ cb.list))
+ list_move(&mdcb->cb.list, &plug->cb_list);
+ return 1;
+ }
+ }
+ /* Not currently on the callback list */
+ mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
+ if (!mdcb)
+ return 0;
+
+ mdcb->mddev = mddev;
+ mdcb->cb.callback = plugger_unplug;
+ atomic_inc(&mddev->plug_cnt);
+ list_add(&mdcb->cb.list, &plug->cb_list);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(mddev_check_plugged);
static inline mddev_t *mddev_get(mddev_t *mddev)
{
@@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev)
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
+ atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
@@ -3158,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
mddev->layout = mddev->new_layout;
mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0;
+ mddev->degraded = 0;
if (mddev->pers->sync_request == NULL) {
/* this is now an array without redundancy, so
* it must always be in_sync
@@ -4723,7 +4736,6 @@ static void md_clean(mddev_t *mddev)
mddev->bitmap_info.chunksize = 0;
mddev->bitmap_info.daemon_sleep = 0;
mddev->bitmap_info.max_write_behind = 0;
- mddev->plug = NULL;
}
static void __md_stop_writes(mddev_t *mddev)
@@ -6688,12 +6700,6 @@ int md_allow_write(mddev_t *mddev)
}
EXPORT_SYMBOL_GPL(md_allow_write);
-void md_unplug(mddev_t *mddev)
-{
- if (mddev->plug)
- mddev->plug->unplug_fn(mddev->plug);
-}
-
#define SYNC_MARKS 10
#define SYNC_MARK_STEP (3*HZ)
void md_do_sync(mddev_t *mddev)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 52b407369e13..0b1fd3f1d85b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,26 +29,6 @@
typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t;
-/* generic plugging support - like that provided with request_queue,
- * but does not require a request_queue
- */
-struct plug_handle {
- void (*unplug_fn)(struct plug_handle *);
- struct timer_list unplug_timer;
- struct work_struct unplug_work;
- unsigned long unplug_flag;
-};
-#define PLUGGED_FLAG 1
-void plugger_init(struct plug_handle *plug,
- void (*unplug_fn)(struct plug_handle *));
-void plugger_set_plug(struct plug_handle *plug);
-int plugger_remove_plug(struct plug_handle *plug);
-static inline void plugger_flush(struct plug_handle *plug)
-{
- del_timer_sync(&plug->unplug_timer);
- cancel_work_sync(&plug->unplug_work);
-}
-
/*
* MD's 'extended' device
*/
@@ -199,6 +179,9 @@ struct mddev_s
int delta_disks, new_level, new_layout;
int new_chunk_sectors;
+ atomic_t plug_cnt; /* If device is expecting
+ * more bios soon.
+ */
struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
sector_t curr_resync; /* last block scheduled */
@@ -336,7 +319,6 @@ struct mddev_s
struct list_head all_mddevs;
struct attribute_group *to_remove;
- struct plug_handle *plug; /* if used by personality */
struct bio_set *bio_set;
@@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev);
extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void restore_bitmap_write_access(struct file *file);
-extern void md_unplug(mddev_t *mddev);
extern void mddev_init(mddev_t *mddev);
extern int md_run(mddev_t *mddev);
@@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
mddev_t *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
mddev_t *mddev);
+extern int mddev_check_plugged(mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c2a21ae56d97..2b7a7ff401dc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf)
spin_unlock_irq(&conf->device_lock);
}
-static void md_kick_device(mddev_t *mddev)
-{
- blk_flush_plug(current);
- md_wakeup_thread(mddev->thread);
-}
-
/* Barriers....
* Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array.
@@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf)
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
/* block any new IO from starting */
conf->barrier++;
@@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
spin_unlock_irq(&conf->resync_lock);
}
@@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- md_kick_device(conf->mddev));
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf)
wait_event_lock_irq(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
- ({ flush_pending_writes(conf);
- md_kick_device(conf->mddev); }));
+ flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(conf_t *conf)
@@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
mdk_rdev_t *blocked_rdev;
+ int plugged;
/*
* Register the new request and wait if the reconstruction
@@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
*/
+ plugged = mddev_check_plugged(mddev);
+
disks = conf->raid_disks;
retry_write:
blocked_rdev = NULL;
@@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync || !bitmap)
+ if (do_sync || !bitmap || !plugged)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev)
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
mdk_rdev_t *rdev;
+ struct blk_plug plug;
md_check_recovery(mddev);
-
+
+ blk_start_plug(&plug);
for (;;) {
char b[BDEVNAME_SIZE];
- flush_pending_writes(conf);
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev)
}
cond_resched();
}
+ blk_finish_plug(&plug);
}
@@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev)
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2da83d566592..8e9462626ec5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf)
spin_unlock_irq(&conf->device_lock);
}
-static void md_kick_device(mddev_t *mddev)
-{
- blk_flush_plug(current);
- md_wakeup_thread(mddev->thread);
-}
-
/* Barriers....
* Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array.
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
/* block any new IO from starting */
conf->barrier++;
- /* No wait for all pending IO to complete */
+ /* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock, md_kick_device(conf->mddev));
+ conf->resync_lock, );
spin_unlock_irq(&conf->resync_lock);
}
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- md_kick_device(conf->mddev));
+ );
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf)
wait_event_lock_irq(conf->wait_barrier,
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
- ({ flush_pending_writes(conf);
- md_kick_device(conf->mddev); }));
+ flush_pending_writes(conf));
+
spin_unlock_irq(&conf->resync_lock);
}
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
unsigned long flags;
mdk_rdev_t *blocked_rdev;
+ int plugged;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
*/
+ plugged = mddev_check_plugged(mddev);
+
raid10_find_phys(conf, r10_bio);
retry_write:
blocked_rdev = NULL;
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync || !mddev->bitmap)
+ if (do_sync || !mddev->bitmap || !plugged)
md_wakeup_thread(mddev->thread);
-
return 0;
}
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev)
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
mdk_rdev_t *rdev;
+ struct blk_plug plug;
md_check_recovery(mddev);
+ blk_start_plug(&plug);
for (;;) {
char b[BDEVNAME_SIZE];
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev)
}
cond_resched();
}
+ blk_finish_plug(&plug);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e867ee42b152..49bf5f891435 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -27,12 +27,12 @@
*
* We group bitmap updates into batches. Each batch has a number.
* We may write out several batches at once, but that isn't very important.
- * conf->bm_write is the number of the last batch successfully written.
- * conf->bm_flush is the number of the last batch that was closed to
+ * conf->seq_write is the number of the last batch successfully written.
+ * conf->seq_flush is the number of the last batch that was closed to
* new additions.
* When we discover that we will need to write to any block in a stripe
* (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
- * the number of the batch it will be in. This is bm_flush+1.
+ * the number of the batch it will be in. This is seq_flush+1.
* When we are ready to do a write, if that batch hasn't been written yet,
* we plug the array and queue the stripe for later.
* When an unplug happens, we increment bm_flush, thus closing the current
@@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state)) {
+ if (test_bit(STRIPE_DELAYED, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
- plugger_set_plug(&conf->plug);
- } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
- sh->bm_seq - conf->seq_write > 0) {
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
- plugger_set_plug(&conf->plug);
- } else {
+ else {
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
@@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- md_raid5_kick_device(conf));
+ );
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
conf->device_lock,
- blk_flush_plug(current));
+ );
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
@@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->hold_list);
}
- } else
- plugger_set_plug(&conf->plug);
+ }
}
static void activate_bit_delay(raid5_conf_t *conf)
@@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf)
}
}
-void md_raid5_kick_device(raid5_conf_t *conf)
-{
- blk_flush_plug(current);
- raid5_activate_delayed(conf);
- md_wakeup_thread(conf->mddev->thread);
-}
-EXPORT_SYMBOL_GPL(md_raid5_kick_device);
-
-static void raid5_unplug(struct plug_handle *plug)
-{
- raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
-
- md_raid5_kick_device(conf);
-}
-
int md_raid5_congested(mddev_t *mddev, int bits)
{
raid5_conf_t *conf = mddev->private;
@@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
+ int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
+ plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int disks, data_disks;
@@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything
* and wait a while
*/
- md_raid5_kick_device(conf);
+ md_wakeup_thread(mddev->thread);
release_stripe(sh);
schedule();
goto retry;
@@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi)
}
}
+ if (!plugged)
+ md_wakeup_thread(mddev->thread);
+
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
spin_unlock_irq(&conf->device_lock);
@@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev)
struct stripe_head *sh;
raid5_conf_t *conf = mddev->private;
int handled;
+ struct blk_plug plug;
pr_debug("+++ raid5d active\n");
md_check_recovery(mddev);
+ blk_start_plug(&plug);
handled = 0;
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
- if (conf->seq_flush != conf->seq_write) {
- int seq = conf->seq_flush;
+ if (atomic_read(&mddev->plug_cnt) == 0 &&
+ !list_empty(&conf->bitmap_list)) {
+ /* Now is a good time to flush some bitmap updates */
+ conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
bitmap_unplug(mddev->bitmap);
spin_lock_irq(&conf->device_lock);
- conf->seq_write = seq;
+ conf->seq_write = conf->seq_flush;
activate_bit_delay(conf);
}
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ raid5_activate_delayed(conf);
while ((bio = remove_bio_from_retry(conf))) {
int ok;
@@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
async_tx_issue_pending_all();
+ blk_finish_plug(&plug);
pr_debug("--- raid5d inactive\n");
}
@@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev)
mdname(mddev));
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- plugger_init(&conf->plug, raid5_unplug);
- mddev->plug = &conf->plug;
if (mddev->queue) {
int chunk_size;
/* read-ahead size must cover two whole stripes, which
@@ -5159,7 +5151,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -5192,7 +5183,6 @@ static int stop(mddev_t *mddev)
mddev->thread = NULL;
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
- plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
free_conf(conf);
mddev->private = NULL;
mddev->to_remove = &raid5_attrs_group;
@@ -5688,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
static void *raid45_takeover_raid0(mddev_t *mddev, int level)
{
struct raid0_private_data *raid0_priv = mddev->private;
+ sector_t sectors;
/* for raid0 takeover only one zone is supported */
if (raid0_priv->nr_strip_zones > 1) {
@@ -5696,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
return ERR_PTR(-EINVAL);
}
+ sectors = raid0_priv->strip_zone[0].zone_end;
+ sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
+ mddev->dev_sectors = sectors;
mddev->new_level = level;
mddev->new_layout = ALGORITHM_PARITY_N;
mddev->new_chunk_sectors = mddev->chunk_sectors;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8d563a4f022a..3ca77a2613ba 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -400,8 +400,6 @@ struct raid5_private_data {
* Cleared when a sync completes.
*/
- struct plug_handle plug;
-
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index c4742fc15529..c9691115f2d2 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
retval = remap_pfn_range(vma, vma->vm_start,
- PFN_DOWN(virt_to_phys(mem->vaddr)),
+ mem->dma_handle >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index d01574d98870..f4c8c844b913 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -55,6 +55,19 @@ int mfd_cell_disable(struct platform_device *pdev)
}
EXPORT_SYMBOL(mfd_cell_disable);
+static int mfd_platform_add_cell(struct platform_device *pdev,
+ const struct mfd_cell *cell)
+{
+ if (!cell)
+ return 0;
+
+ pdev->mfd_cell = kmemdup(cell, sizeof(*cell), GFP_KERNEL);
+ if (!pdev->mfd_cell)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int mfd_add_device(struct device *parent, int id,
const struct mfd_cell *cell,
struct resource *mem_base,
@@ -75,7 +88,7 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.parent = parent;
- ret = platform_device_add_data(pdev, cell, sizeof(*cell));
+ ret = mfd_platform_add_cell(pdev, cell);
if (ret)
goto fail_res;
@@ -123,7 +136,6 @@ static int mfd_add_device(struct device *parent, int id,
return 0;
-/* platform_device_del(pdev); */
fail_res:
kfree(res);
fail_device:
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 20e4e9395b61..ecafa4ba238b 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -348,15 +348,15 @@ static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
-static void gru_noop(unsigned int irq)
+static void gru_noop(struct irq_data *d)
{
}
static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
[0 ... GRU_CHIPLETS_PER_BLADE - 1] {
- .mask = gru_noop,
- .unmask = gru_noop,
- .ack = gru_noop
+ .irq_mask = gru_noop,
+ .irq_unmask = gru_noop,
+ .irq_ack = gru_noop
}
};
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index 237913c5c92c..fed215c4cfa1 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1452,7 +1452,7 @@ static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
oinfo = mtd->ecclayout;
if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) {
printk(KERN_ERR "%s: Not enough free bytes in OOB, "
- "%d available, %lu needed.\n",
+ "%d available, %zu needed.\n",
MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE);
return;
}
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 6fae04b3fc6d..950646aa4c4b 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -209,22 +209,8 @@ static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
int err = -EIO;
enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- if (buf >= high_memory) {
- struct page *pg;
-
- if (((size_t)buf & PAGE_MASK) !=
- ((size_t)(buf + len - 1) & PAGE_MASK)) {
- dev_warn(host->dev, "Buffer not fit in one page\n");
- goto err_buf;
- }
-
- pg = vmalloc_to_page(buf);
- if (pg == 0) {
- dev_err(host->dev, "Failed to vmalloc_to_page\n");
- goto err_buf;
- }
- p = page_address(pg) + ((size_t)buf & ~PAGE_MASK);
- }
+ if (buf >= high_memory)
+ goto err_buf;
dma_dev = host->dma_chan->device;
@@ -280,7 +266,8 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
struct nand_chip *chip = mtd->priv;
struct atmel_nand_host *host = chip->priv;
- if (use_dma && len >= mtd->oobsize)
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
@@ -295,7 +282,8 @@ static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
struct nand_chip *chip = mtd->priv;
struct atmel_nand_host *host = chip->priv;
- if (use_dma && len >= mtd->oobsize)
+ if (use_dma && len > mtd->oobsize)
+ /* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
@@ -599,7 +587,10 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->options |= NAND_USE_FLASH_BBT;
}
- if (cpu_has_dma() && use_dma) {
+ if (!cpu_has_dma())
+ use_dma = 0;
+
+ if (use_dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
@@ -611,7 +602,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
}
}
if (use_dma)
- dev_info(host->dev, "Using DMA for NAND access.\n");
+ dev_info(host->dev, "Using %s for DMA transfers.\n",
+ dma_chan_name(host->dma_chan));
else
dev_info(host->dev, "No DMA support for NAND access.\n");
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f803c58b941d..66823eded7a3 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -154,7 +154,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
- u8 msix_vec_idx;
+ u8 eq_idx;
struct napi_struct napi;
};
@@ -291,7 +291,7 @@ struct be_adapter {
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
- u8 msix_vec_next_idx;
+ u8 eq_next_idx;
struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 9a54c8b24ff9..7cb5a114c733 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1497,7 +1497,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
- adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
/* Alloc TX eth compl queue */
@@ -1590,7 +1590,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
- rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
/* CQ */
cq = &rxo->cq;
@@ -1666,11 +1666,11 @@ static irqreturn_t be_intx(int irq, void *dev)
if (!isr)
return IRQ_NONE;
- if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+ if ((1 << adapter->tx_eq.eq_idx & isr))
event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) {
- if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+ if ((1 << rxo->rx_eq.eq_idx & isr))
event_handle(adapter, &rxo->rx_eq);
}
}
@@ -1951,7 +1951,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj)
{
- return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
+ return adapter->msix_entries[eq_obj->eq_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@@ -2345,6 +2345,7 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
if (be_physfn(adapter) && adapter->sriov_enabled)
for (vf = 0; vf < num_vfs; vf++)
@@ -3141,12 +3142,14 @@ static int be_resume(struct pci_dev *pdev)
static void be_shutdown(struct pci_dev *pdev)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
- if (netif_running(netdev))
+ if (!adapter)
+ return;
+
+ if (netif_running(adapter->netdev))
cancel_delayed_work_sync(&adapter->work);
- netif_device_detach(netdev);
+ netif_device_detach(adapter->netdev);
be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 34933cb9569f..7581518ecfa2 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -38,6 +38,8 @@
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
* execution context (driver/bios) must match.
*/
static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
if (fwhdr.signature != drv_fwhdr->signature)
return false;
- if (fwhdr.exec != drv_fwhdr->exec)
+ if (swab32(fwhdr.param) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
enum bfi_ioc_state ioc_fwstate;
bool fwvalid;
+ u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ boot_env = BFI_BOOT_LOADER_OS;
+
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
- false : bfa_ioc_fwver_valid(ioc);
+ false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
return;
}
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
}
void
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
*/
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
- u32 boot_param)
+ u32 boot_env)
{
u32 *fwimg;
u32 pgnum, pgoff;
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type and boot param at the end.
*/
- writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_TYPE_OFF)));
- writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_PARAM_OFF)));
+ writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_LOADER_OFF)));
}
static void
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
* as the entry vector.
*/
static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
{
void __iomem *rb;
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
- if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_param);
+ bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
* Enable interrupts just before starting LPU
@@ -2219,13 +2224,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
- u16 bdf;
-
- bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
- ioc->pcidev.device_id);
-
- pr_crit("Firmware heartbeat failure at %d", bdf);
- BUG_ON(1);
+ pr_crit("Heart Beat of IOC has failed\n");
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
static void
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index e4974bc24ef6..bd48abee781f 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
bool msix);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_start) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 469997c4ffd1..87aecdf22cf9 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -345,6 +347,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
/**
* Synchronized IOC failure processing routines
*/
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+/**
+ * Synchronized IOC failure processing routines
+ */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index a97396811050..6050379526f7 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -184,12 +184,14 @@ enum bfi_mclass {
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
#define BFI_BOOT_TYPE_OFF 8
-#define BFI_BOOT_PARAM_OFF 12
+#define BFI_BOOT_LOADER_OFF 12
-#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
+#define BFI_BOOT_LOADER_OS 0
+
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 9f356d5d0f33..8e6ceab9f4d8 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index f5050155c6b5..89cb977898cb 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
for (i = 0; i < (data * 2); i++) {
if ((i % 2) == 0)
bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER, SPEED_1000);
+ LED_MODE_ON, SPEED_1000);
else
bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OFF, 0);
+ LED_MODE_FRONT_PANEL_OFF, 0);
msleep_interruptible(500);
if (signal_pending(current))
break;
}
- if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
- bp->link_vars.line_speed);
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER, bp->link_vars.line_speed);
return 0;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9bc5de3e04a8..ba715826e2a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
- tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
+ tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
@@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
*/
rlb_choose_channel(skb, bond);
- /* The ARP relpy packets must be delayed so that
+ /* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
- * slave in the bond. This address must be, of course, one of the premanent
+ * slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 86861f08b24d..8ca7158b2dda 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -75,7 +75,7 @@ struct tlb_client_info {
* gave this entry index.
*/
u32 tx_bytes; /* Each Client accumulates the BytesTx that
- * were tranmitted to it, and after each
+ * were transmitted to it, and after each
* CallBack the LoadHistory is divided
* by the balance interval
*/
@@ -122,7 +122,6 @@ struct tlb_slave_info {
};
struct alb_bond_info {
- struct timer_list alb_timer;
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load;
@@ -140,7 +139,6 @@ struct alb_bond_info {
struct slave *next_rx_slave;/* next slave to be assigned
* to a new rx client for
*/
- u32 rlb_interval_counter;
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7513c4523ac4..330140ee266d 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+ DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c0a1bc5b1435..bd1d811c204f 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
if (!ofdev->dev.of_match)
return -EINVAL;
- data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+ data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
base = of_iomap(np, 0);
if (!base) {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ea0dc451da9c..d70fb76edb77 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
- | NETIF_F_NETNS_LOCAL;
+ | NETIF_F_NETNS_LOCAL
+ | NETIF_F_VLAN_CHALLENGED;
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = &eth_header_ops;
dev->netdev_ops = &loopback_ops;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index cfd50bc49169..62dd21b06df4 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err = mlx4_en_init_allocator(priv, ring);
if (err) {
en_err(priv, "Failed initializing ring allocator\n");
+ if (ring->stride <= TXBB_SIZE)
+ ring->buf -= TXBB_SIZE;
ring_ind--;
goto err_allocator;
}
@@ -369,6 +371,8 @@ err_buffers:
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
+ if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
ring_ind--;
}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 62fa7eec5f0c..3814fc9b1145 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
}
for (port = 1; port <= dev->caps.num_ports; port++) {
+ enum mlx4_port_type port_type = 0;
+ mlx4_SENSE_PORT(dev, port, &port_type);
+ if (port_type)
+ dev->caps.port_type[port] = port_type;
ib_port_default_caps = 0;
err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
if (err)
@@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_mcg_table_free;
}
}
+ mlx4_set_port_mask(dev);
return 0;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index c1e0e5f1bcdb..dd7d745fbab4 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults);
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index 015fbe785c13..e2337a7411d9 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -38,8 +38,8 @@
#include "mlx4.h"
-static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
- enum mlx4_port_type *type)
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index aa2813e06d00..1074231f0a0d 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
prev_eedata = eedata;
}
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
dev->base_addr = (unsigned long __force) ioaddr;
dev->irq = irq;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d7299f1a4940..679dc8519c5b 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -174,7 +174,7 @@
#define MAX_NUM_CARDS 4
-#define MAX_BUFFERS_PER_CMD 32
+#define NETXEN_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
*/
struct netxen_cmd_buffer {
struct sk_buff *skb;
- struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 83348dc4b184..e8a4b6655999 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
int i, k;
+ int delta = 0;
+ struct skb_frag_struct *frag;
u32 producer;
int frag_count, no_of_desc;
@@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += frag->size;
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 693aaef4e3ce..718879b35b7d 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk);
if (po->pppoe_dev == dev &&
- sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index dc44564ef6f9..b0dead00b2d1 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -99,6 +99,7 @@
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cd88c7e1bfa9..cb1a1ef36c0a 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
struct ethhdr *phdr;
+ int delta = 0;
int i, k;
u32 producer;
@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_shinfo(skb)->frags[i].size;
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679e4c4d..a3c2aab53de8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
+ if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2ef1074..cc978803d484 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
+ rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 9ffa9a6b55a0..191a311da2dc 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@ struct efx_channel {
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e8396614daf3..10f1cb79c147 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return ((efx_qword_t *) (channel->eventq.addr)) + index;
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
}
/* See if an event is present
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
channel->channel);
}
@@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
- ++channel->magic_count;
+ ; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
@@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@@ -1060,6 +1061,13 @@ out:
return spent;
}
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d9de1b647d41..a42db6e35be3 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index a0f49b348d62..50ad3bcaf68a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
- struct efx_channel *channel;
-
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx->last_irq_cpu = -1;
smp_wmb();
- /* ACK each interrupting event queue. Receiving an interrupt due to
- * traffic before a test event is raised is considered a pass */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- if (efx->last_irq_cpu >= 0)
- goto success;
- }
-
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
- unsigned int magic_count, count;
+ unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- magic_count = channel->magic_count;
+ read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
do {
schedule_timeout_uninterruptible(HZ / 100);
- if (channel->work_pending)
- efx_process_channel_now(channel);
-
- if (channel->magic_count != magic_count)
+ if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
}
/* Check to see if event was received even if interrupt wasn't */
- efx_process_channel_now(channel);
- if (channel->magic_count != magic_count) {
+ if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
+ netif_tx_wake_all_queues(efx->net_dev);
+
return rc_test;
}
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 139801908217..d2c85dfdf3bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* queue state. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled)) {
+ likely(efx->port_enabled) &&
+ likely(!efx->port_inhibited)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cb317cd069ff..484f795a779d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
return 1;
}
@@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
outb(0x09 + i, 0x70);
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr.
+ * @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
}
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
/* enable packet filtering */
outl(rfcrSave | RFEN, rfcr + ioaddr);
@@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
outl(EEDONE, ee_addr);
return 1;
} else {
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index c498b720b532..4b42ecc63dcf 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1818,6 +1818,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
SMSC_TRACE(PROBE, "PHY will be autodetected.");
spin_lock_init(&pdata->dev_lock);
+ spin_lock_init(&pdata->mac_lock);
if (pdata->ioaddr == 0) {
SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
@@ -1895,8 +1896,11 @@ static int __devinit smsc911x_init(struct net_device *dev)
/* workaround for platforms without an eeprom, where the mac address
* is stored elsewhere and set by the bootloader. This saves the
* mac address before resetting the device */
- if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS)
+ if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
+ spin_lock_irq(&pdata->mac_lock);
smsc911x_read_mac_address(dev);
+ spin_unlock_irq(&pdata->mac_lock);
+ }
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata))
@@ -2059,8 +2063,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
}
- spin_lock_init(&pdata->mac_lock);
-
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
SMSC_WARNING(PROBE,
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d65fab1ba790..e25093510b0c 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -26,9 +26,9 @@
#undef DWMAC_DMA_DEBUG
#ifdef DWMAC_DMA_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
#else
-#define DBG(fmt, args...) do { } while (0)
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
- DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
#ifdef DWMAC_DMA_DEBUG
/* It displays the DMA process states (CSR5 register) */
show_tx_process_state(intr_status);
@@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
#endif
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(INFO, "transmit underflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
ret = tx_hard_error_bump_tc;
x->tx_undeflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(INFO, "transmit jabber\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
x->tx_jabber_irq++;
}
if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(INFO, "recv overflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
x->rx_overflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(INFO, "receive buffer unavailable\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
x->rx_buf_unav_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(INFO, "receive process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
x->rx_process_stopped_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(INFO, "receive watchdog\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
x->rx_watchdog_irq++;
}
if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(INFO, "transmit early interrupt\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
x->tx_early_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(INFO, "transmit process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
x->tx_process_stopped_irq++;
ret = tx_hard_error;
}
if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(INFO, "fatal bus error\n");
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
x->fatal_bus_error_irq++;
ret = tx_hard_error;
}
@@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
- DBG(INFO, "\n\n");
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
return ret;
}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 0e5f03135b50..cc973fc38405 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
- stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
}
@@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev)
stmmac_verify_args();
- ret = stmmac_init_phy(dev);
- if (unlikely(ret)) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
- return ret;
- }
-
- /* Request the IRQ lines */
- ret = request_irq(dev->irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
- __func__, dev->irq, ret);
- return ret;
- }
-
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev)
} else
priv->tm->enable = 1;
#endif
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ goto open_error;
+ }
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
- priv->dma_tx_phy,
- priv->dma_rx_phy) < 0)) {
-
+ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+ priv->dma_tx_phy, priv->dma_rx_phy);
+ if (ret < 0) {
pr_err("%s: DMA initialization failed\n", __func__);
- return -1;
+ goto open_error;
}
/* Copy the MAC addr into the HW */
@@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev)
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ goto open_error;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_enable_mac(priv->ioaddr);
@@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev)
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
+
return 0;
+
+open_error:
+#ifdef CONFIG_STMMAC_TIMER
+ kfree(priv->tm);
+#endif
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ return ret;
}
/**
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 8a3b191b195b..ff32befd8443 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* The NIC has told us that a packet has been downloaded onto the card, we must
* find out which packet it has done, clear the skb and information for the packet
- * then advance around the ring for all tranmitted packets
+ * then advance around the ring for all transmitted packets
*/
static void xl_dn_comp(struct net_device *dev)
@@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 5bd140704533..9354ca9da576 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1675,7 +1675,7 @@ drop_frame:
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3d2fbe60b46e..2684003b8ab6 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1500,7 +1500,7 @@ drop_frame:
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 727874d9deb6..47a6c870b51f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1313,6 +1313,21 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9909),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* SMSC LAN9530 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9530),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC LAN9730 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9730),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC LAN89530 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E08),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f1b8af64569c..2d10239ce829 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
}
ret = ath9k_htc_hw_init(hif_dev->htc_handle,
- &hif_dev->udev->dev, hif_dev->device_id,
+ &interface->dev, hif_dev->device_id,
hif_dev->udev->product, id->driver_info);
if (ret) {
ret = -EINVAL;
@@ -1158,7 +1158,7 @@ fail_resume:
#endif
static struct usb_driver ath9k_hif_usb_driver = {
- .name = "ath9k_hif_usb",
+ .name = KBUILD_MODNAME,
.probe = ath9k_hif_usb_probe,
.disconnect = ath9k_hif_usb_disconnect,
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 338b07502f1a..c95bc5cc1a1f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ah->txchainmask = common->tx_chainmask;
ah->rxchainmask = common->rx_chainmask;
- if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
- ath9k_hw_abortpcurecv(ah);
- if (!ath9k_hw_stopdmarecv(ah)) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to stop receive dma\n");
- bChannelChange = false;
- }
- }
-
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -2546,6 +2537,7 @@ static struct {
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
+ { AR_SREV_VERSION_9485, "9485" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 562257ac52cf..edc1cbbfecaf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
{
#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
#define AH_RX_TIME_QUANTUM 100 /* usec */
struct ath_common *common = ath9k_hw_common(ah);
+ u32 mac_status, last_mac_status = 0;
int i;
+ /* Enable access to the DMA observation bus */
+ REG_WRITE(ah, AR_MACMISC,
+ ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
+ (AR_MACMISC_MISC_OBS_BUS_1 <<
+ AR_MACMISC_MISC_OBS_BUS_MSB_S)));
+
REG_WRITE(ah, AR_CR, AR_CR_RXD);
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
break;
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
+ if (mac_status == 0x1c0 && mac_status == last_mac_status) {
+ *reset = true;
+ break;
+ }
+
+ last_mac_status = mac_status;
+ }
+
udelay(AH_TIME_QUANTUM);
}
if (i == 0) {
ath_err(common,
- "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
AH_RX_STOP_DMA_TIMEOUT / 1000,
REG_READ(ah, AR_CR),
- REG_READ(ah, AR_DIAG_SW));
+ REG_READ(ah, AR_DIAG_SW),
+ REG_READ(ah, AR_DMADBG_7));
return false;
} else {
return true;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b2b2ff852c32..c2a59386fb9c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_abortpcurecv(struct ath_hw *ah);
-bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
+bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
int ath9k_hw_beaconq_setup(struct ath_hw *ah);
/* Interrupt Handling */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dddb85de622d..17d04ff8d678 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ath9k_calculate_iter_data(hw, vif, &iter_data);
- ath9k_ps_wakeup(sc);
/* Set BSSID mask. */
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
@@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
}
ath9k_hw_set_interrupts(ah, ah->imask);
- ath9k_ps_restore(sc);
/* Set up ANI */
if ((iter_data.naps + iter_data.nadhocs) > 0) {
@@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_vif *avp = (void *)vif->drv_priv;
int ret = 0;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
switch (vif->type) {
@@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return ret;
}
@@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
+ ath9k_ps_wakeup(sc);
/* See if new interface type is valid. */
if ((new_type == NL80211_IFTYPE_ADHOC) &&
@@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
ath9k_do_vif_add_setup(hw, vif);
out:
+ ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
return ret;
}
@@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
sc->nvifs--;
@@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
txq = sc->tx.txq_map[queue];
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
ath_beaconq_config(sc);
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return ret;
}
@@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
int slottime;
int error;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
if (changed & BSS_CHANGED_BSSID) {
@@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
}
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a9c3f4672aa0..dcd19bc337d1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -486,12 +486,12 @@ start_recv:
bool ath_stoprecv(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
- bool stopped;
+ bool stopped, reset = false;
spin_lock_bh(&sc->rx.rxbuflock);
ath9k_hw_abortpcurecv(ah);
ath9k_hw_setrxfilter(ah, 0);
- stopped = ath9k_hw_stopdmarecv(ah);
+ stopped = ath9k_hw_stopdmarecv(ah, &reset);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_edma_stop_recv(sc);
@@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
"confusing the DMA engine when we start RX up\n");
ATH_DBG_WARN_ON_ONCE(!stopped);
}
- return stopped;
+ return stopped || reset;
}
void ath_flushrecv(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 248c670fdfbe..5c2cfe694152 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
{APL3_FCCA, CTL_FCC, CTL_FCC},
+ {APL7_FCCA, CTL_FCC, CTL_FCC},
{APL1_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_ETSIC, CTL_FCC, CTL_ETSI},
{APL2_APLD, CTL_FCC, NO_CTL},
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 3d5566e7af0a..ff0f5ba14b2c 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1536,7 +1536,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
- if (unlikely(len > ring->rx_buffersize)) {
+ if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
* This should never happen, as we try to allocate buffers
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index a01c2100f166..e8a80a1251bf 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -163,7 +163,7 @@ struct b43_dmadesc_generic {
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64
-#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN
+#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
/* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index 2a45dd44cc12..aef65cd47661 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,6 +1,5 @@
config IWLWIFI_LEGACY
- tristate "Intel Wireless Wifi legacy devices"
- depends on PCI && MAC80211
+ tristate
select FW_LOADER
select NEW_LEDS
select LEDS_CLASS
@@ -65,7 +64,8 @@ endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
- depends on IWLWIFI_LEGACY
+ depends on PCI && MAC80211
+ select IWLWIFI_LEGACY
---help---
This option enables support for
@@ -92,7 +92,8 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
- depends on IWLWIFI_LEGACY
+ depends on PCI && MAC80211
+ select IWLWIFI_LEGACY
---help---
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 779d3cb86e2c..5c3a68d3af12 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -74,8 +74,6 @@
/* RSSI to dBm */
#define IWL39_RSSI_OFFSET 95
-#define IWL_DEFAULT_TX_POWER 0x0F
-
/*
* EEPROM related constants, enums, and structures.
*/
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 08b189c8472d..fc6fa2886d9c 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl {
#define IWL4965_DEFAULT_TX_RETRY 15
-/* Limit range of txpower output target to be between these values */
-#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
-
/* EEPROM */
#define IWL4965_FIRST_AMPDU_QUEUE 10
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 7007d61bb6b5..c1511b14b239 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
struct ieee80211_channel *geo_ch;
struct ieee80211_rate *rates;
int i = 0;
+ s8 max_tx_power = 0;
if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
geo_ch->flags |= ch->ht40_extension_channel;
- if (ch->max_power_avg > priv->tx_power_device_lmt)
- priv->tx_power_device_lmt = ch->max_power_avg;
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
} else {
geo_ch->flags |= IEEE80211_CHAN_DISABLED;
}
@@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
geo_ch->flags);
}
+ priv->tx_power_device_lmt = max_tx_power;
+ priv->tx_power_user_lmt = max_tx_power;
+ priv->tx_power_next = max_tx_power;
+
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
priv->cfg->sku & IWL_SKU_A) {
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
if (!priv->cfg->ops->lib->send_tx_power)
return -EOPNOTSUPP;
- if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) {
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
IWL_WARN(priv,
- "Requested user TXPOWER %d below lower limit %d.\n",
- tx_power,
- IWL4965_TX_POWER_TARGET_POWER_MIN);
+ "Requested user TXPOWER %d below 1 mW.\n",
+ tx_power);
return -EINVAL;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
index 04c5648027df..cb346d1a9ffa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv)
flags & EEPROM_CHANNEL_RADAR))
? "" : "not ");
- /* Set the tx_power_user_lmt to the highest power
- * supported by any channel */
- if (eeprom_ch_info[ch].max_power_avg >
- priv->tx_power_user_lmt)
- priv->tx_power_user_lmt =
- eeprom_ch_info[ch].max_power_avg;
-
ch_info++;
}
}
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 28eb3d885ba1..cc7ebcee60e5 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
- priv->tx_power_next = IWL_DEFAULT_TX_POWER;
-
if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
eeprom->version);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 91b3d8b9d7a5..d484c3678163 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
iwl_legacy_init_scan_params(priv);
- /* Set the tx_power_user_lmt to the lowest power level
- * this value will get overwritten by channel max power avg
- * from eeprom */
- priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
- priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
-
ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3ea31b659d1a..22e045b5bcee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = {
struct iwl_cfg iwl5300_agn_cfg = {
.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
IWL_DEVICE_5000,
+ /* at least EEPROM 0x11A has wrong info */
+ .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
+ .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
.ht_params = &iwl5000_ht_params,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 98aa8af01192..20b66469d68f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -241,7 +241,7 @@ struct iwl_eeprom_enhanced_txpwr {
/* 6x00 Specific */
#define EEPROM_6000_TX_POWER_VERSION (4)
-#define EEPROM_6000_EEPROM_VERSION (0x434)
+#define EEPROM_6000_EEPROM_VERSION (0x423)
/* 6x50 Specific */
#define EEPROM_6050_TX_POWER_VERSION (4)
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 36952274950e..c1ceb4b23971 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -137,6 +137,7 @@ struct mwl8k_tx_queue {
struct mwl8k_priv {
struct ieee80211_hw *hw;
struct pci_dev *pdev;
+ int irq;
struct mwl8k_device_info *device_info;
@@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
IRQF_SHARED, MWL8K_NAME, hw);
if (rc) {
+ priv->irq = -1;
wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
return -EIO;
}
+ priv->irq = priv->pdev->irq;
/* Enable TX reclaim and RX tasklets. */
tasklet_enable(&priv->poll_tx_task);
@@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
+ priv->irq = -1;
tasklet_disable(&priv->poll_tx_task);
tasklet_disable(&priv->poll_rx_task);
}
@@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
/* Disable interrupts */
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- free_irq(priv->pdev->irq, hw);
+ if (priv->irq != -1) {
+ free_irq(priv->pdev->irq, hw);
+ priv->irq = -1;
+ }
/* Stop finalize join worker */
cancel_work_sync(&priv->finalize_join_worker);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 9b344a921e74..e18358725b69 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -56,6 +56,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */
{USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
{USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
+ {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */
{USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
{USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
{USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
@@ -68,6 +69,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
{USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
{USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */
+ {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */
{USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */
{USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 7834c26c2954..042842e704de 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_info *p54info;
struct p54_hdr *hdr;
struct p54_tx_data *txhdr;
- unsigned int padding, len, extra_len;
+ unsigned int padding, len, extra_len = 0;
int i, j, ridx;
u16 hdr_flags = 0, aid = 0;
u8 rate, queue = 0, crypt_offset = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9de9dbe94399..84eb6ad36377 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1062,8 +1062,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Stop all work.
*/
cancel_work_sync(&rt2x00dev->intf_work);
- cancel_work_sync(&rt2x00dev->rxdone_work);
- cancel_work_sync(&rt2x00dev->txdone_work);
+ if (rt2x00_is_usb(rt2x00dev)) {
+ cancel_work_sync(&rt2x00dev->rxdone_work);
+ cancel_work_sync(&rt2x00dev->txdone_work);
+ }
destroy_workqueue(rt2x00dev->workqueue);
/*
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index f74a8701c67d..590f14f45a89 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -685,7 +685,7 @@ static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
u8 efuse_data, word_cnts = 0;
u16 efuse_addr = 0;
- u8 hworden;
+ u8 hworden = 0;
u8 tmpdata[8];
if (data == NULL)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 5ef91374b230..28a6ce3bc239 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -303,7 +303,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
u16 box_reg, box_extreg;
u8 u1b_tmp;
bool isfw_read = false;
- u8 buf_index;
+ u8 buf_index = 0;
bool bwrite_sucess = false;
u8 wait_h2c_limmit = 100;
u8 wait_writeh2c_limmit = 100;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a4b2613d6a8c..f5d85735d642 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -246,7 +246,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv __maybe_unused *rtlpriv = rtl_priv(hw);
mutex_destroy(&rtlpriv->io.bb_mutex);
}
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5b9dbeafec06..b1c7d031c391 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -340,7 +340,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 18cf01719ae0..ffc745b17f4d 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -487,7 +487,7 @@ module_init(wl1271_init);
module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index e64403b6896d..6ec06a4a4c6d 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -204,7 +204,10 @@ static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
kfree(wl->nvs);
- wl->nvs = kzalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ if (len != sizeof(struct wl1271_nvs_file))
+ return -EINVAL;
+
+ wl->nvs = kzalloc(len, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
ret = -ENOMEM;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 58236e6d0921..ab607bbd6291 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -643,7 +643,7 @@ static void rx_urb_complete(struct urb *urb)
usb = urb->context;
rx = &usb->rx;
- zd_usb_reset_rx_idle_timer(usb);
+ tasklet_schedule(&rx->reset_timer_tasklet);
if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */
@@ -812,6 +812,7 @@ void zd_usb_disable_rx(struct zd_usb *usb)
__zd_usb_disable_rx(usb);
mutex_unlock(&rx->setup_mutex);
+ tasklet_kill(&rx->reset_timer_tasklet);
cancel_delayed_work_sync(&rx->idle_work);
}
@@ -1106,6 +1107,13 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
zd_usb_reset_rx(usb);
}
+static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
+{
+ struct zd_usb *usb = (struct zd_usb *)param;
+
+ zd_usb_reset_rx_idle_timer(usb);
+}
+
void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
@@ -1127,6 +1135,7 @@ static inline void init_usb_interrupt(struct zd_usb *usb)
static inline void init_usb_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
+
spin_lock_init(&rx->lock);
mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
@@ -1136,11 +1145,14 @@ static inline void init_usb_rx(struct zd_usb *usb)
}
ZD_ASSERT(rx->fragment_length == 0);
INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
+ rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
+ rx->reset_timer_tasklet.data = (unsigned long)usb;
}
static inline void init_usb_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
+
spin_lock_init(&tx->lock);
atomic_set(&tx->enabled, 0);
tx->stopped = 0;
@@ -1671,6 +1683,10 @@ static void iowrite16v_urb_complete(struct urb *urb)
if (urb->status && !usb->cmd_error)
usb->cmd_error = urb->status;
+
+ if (!usb->cmd_error &&
+ urb->actual_length != urb->transfer_buffer_length)
+ usb->cmd_error = -EIO;
}
static int zd_submit_waiting_urb(struct zd_usb *usb, bool last)
@@ -1805,7 +1821,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb,
ep->desc.bInterval);
- urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK;
+ urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */
r = zd_submit_waiting_urb(usb, false);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index b3df2c8116cc..325d0f989257 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -183,6 +183,7 @@ struct zd_usb_rx {
spinlock_t lock;
struct mutex setup_mutex;
struct delayed_work idle_work;
+ struct tasklet_struct reset_timer_tasklet;
u8 fragment[2 * USB_MAX_RX_SIZE];
unsigned int fragment_length;
unsigned int usb_packet_size;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index a3755ffc03d4..bc8ce48f0778 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2550,7 +2550,6 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
const struct parport_pc_via_data *via)
{
short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
- struct resource *base_res;
u32 ite8872set;
u32 ite8872_lpt, ite8872_lpthi;
u8 ite8872_irq, type;
@@ -2561,8 +2560,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
/* make sure which one chip */
for (i = 0; i < 5; i++) {
- base_res = request_region(inta_addr[i], 32, "it887x");
- if (base_res) {
+ if (request_region(inta_addr[i], 32, "it887x")) {
int test;
pci_write_config_dword(pdev, 0x60,
0xe5000000 | inta_addr[i]);
@@ -2571,7 +2569,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
test = inb(inta_addr[i]);
if (test != 0xff)
break;
- release_region(inta_addr[i], 0x8);
+ release_region(inta_addr[i], 32);
}
}
if (i >= 5) {
@@ -2635,7 +2633,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
/*
* Release the resource so that parport_pc_probe_port can get it.
*/
- release_resource(base_res);
+ release_region(inta_addr[i], 32);
if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
printk(KERN_INFO
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c8ff646c0b05..0fa466a91bf4 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -88,4 +88,6 @@ config PCI_IOAPIC
depends on HOTPLUG
default y
-select NLS if (DMI || ACPI)
+config PCI_LABEL
+ def_bool y if (DMI || ACPI)
+ select NLS
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 98d61c8e984b..c85f744270a5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -56,10 +56,10 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
# ACPI Related PCI FW Functions
# ACPI _DSM provided firmware instance and string name
#
-obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o
+obj-$(CONFIG_ACPI) += pci-acpi.o
# SMBIOS provided firmware instance and labels
-obj-$(CONFIG_DMI) += pci-label.o
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
# Cardbus & CompactPCI use setup-bus
obj-$(CONFIG_HOTPLUG) += setup-bus.o
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 505c1c7075f0..d552d2c77844 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain,
static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_rbtree_key;
-static void dmar_init_reserved_ranges(void)
+static int dmar_init_reserved_ranges(void)
{
struct pci_dev *pdev = NULL;
struct iova *iova;
@@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void)
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
IOVA_PFN(IOAPIC_RANGE_END));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve IOAPIC range failed\n");
+ return -ENODEV;
+ }
/* Reserve all PCI MMIO to avoid peer-to-peer access */
for_each_pci_dev(pdev) {
@@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void)
iova = reserve_iova(&reserved_iova_list,
IOVA_PFN(r->start),
IOVA_PFN(r->end));
- if (!iova)
+ if (!iova) {
printk(KERN_ERR "Reserve iova failed\n");
+ return -ENODEV;
+ }
}
}
-
+ return 0;
}
static void domain_reserve_special_ranges(struct dmar_domain *domain)
@@ -1835,7 +1839,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
ret = iommu_attach_domain(domain, iommu);
if (ret) {
- domain_exit(domain);
+ free_domain_mem(domain);
goto error;
}
@@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
-int __init init_dmars(void)
+static int __init init_dmars(int force_on)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@@ -2393,8 +2397,15 @@ int __init init_dmars(void)
* enable translation
*/
for_each_drhd_unit(drhd) {
- if (drhd->ignored)
+ if (drhd->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on
+ * this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(drhd->iommu);
continue;
+ }
iommu = drhd->iommu;
iommu_flush_write_buffer(iommu);
@@ -3240,9 +3251,15 @@ static int device_notifier(struct notifier_block *nb,
if (!domain)
return 0;
- if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
+ if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
domain_remove_one_dev_info(domain, pdev);
+ if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
+ !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
+ list_empty(&domain->devices))
+ domain_exit(domain);
+ }
+
return 0;
}
@@ -3277,12 +3294,21 @@ int __init intel_iommu_init(void)
if (no_iommu || dmar_disabled)
return -ENODEV;
- iommu_init_mempool();
- dmar_init_reserved_ranges();
+ if (iommu_init_mempool()) {
+ if (force_on)
+ panic("tboot: Failed to initialize iommu memory\n");
+ return -ENODEV;
+ }
+
+ if (dmar_init_reserved_ranges()) {
+ if (force_on)
+ panic("tboot: Failed to reserve iommu ranges\n");
+ return -ENODEV;
+ }
init_no_remapping_devices();
- ret = init_dmars();
+ ret = init_dmars(force_on);
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
@@ -3391,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain->iommu_count--;
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
+
+ spin_lock_irqsave(&iommu->lock, tmp_flags);
+ clear_bit(domain->id, iommu->domain_ids);
+ iommu->domains[domain->id] = NULL;
+ spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3607,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
pte = dmar_domain->pgd;
if (dma_pte_present(pte)) {
- free_pgtable_page(dmar_domain->pgd);
dmar_domain->pgd = (struct dma_pte *)
phys_to_virt(dma_pte_addr(pte));
+ free_pgtable_page(pte);
}
dmar_domain->agaw--;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d86ea8b01137..135df164a4c1 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -781,7 +781,7 @@ static int pci_pm_resume(struct device *dev)
#endif /* !CONFIG_SUSPEND */
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int pci_pm_freeze(struct device *dev)
{
@@ -970,7 +970,7 @@ static int pci_pm_restore(struct device *dev)
return error;
}
-#else /* !CONFIG_HIBERNATION */
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
#define pci_pm_freeze_noirq NULL
@@ -981,7 +981,7 @@ static int pci_pm_restore(struct device *dev)
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
-#endif /* !CONFIG_HIBERNATION */
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 89d0a6a88df7..ebf51ad1b714 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -676,10 +676,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = align1 >> 1;
align += aligns[order];
}
- size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), align);
+ size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
size1 = !add_size ? size :
calculate_memsize(size, min_size+add_size, 0,
- resource_size(b_res), align);
+ resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 453c54c97612..4c3e94c0ae85 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -25,6 +25,8 @@
#include <mach/balloon3.h>
+#include <asm/mach-types.h>
+
#include "soc_common.h"
/*
@@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void)
{
int ret;
+ if (!machine_is_balloon3())
+ return -ENODEV;
+
balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!balloon3_pcmcia_device)
return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index b7e596620db1..b829e655457b 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
for (i = 0; i < ARRAY_SIZE(irqs); i++) {
if (irqs[i].sock != skt->nr)
continue;
- if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) {
+ if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) {
pr_err("%s: sock %d unable to request gpio %d\n",
- __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq));
+ __func__, skt->nr, irq_to_gpio(irqs[i].irq));
ret = -EBUSY;
goto error;
}
- if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) {
+ if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) {
pr_err("%s: sock %d unable to set input gpio %d\n",
- __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq));
+ __func__, skt->nr, irq_to_gpio(irqs[i].irq));
ret = -EINVAL;
goto error;
}
@@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
error:
for (; i >= 0; i--) {
- gpio_free(IRQ_TO_GPIO(irqs[i].irq));
+ gpio_free(irq_to_gpio(irqs[i].irq));
}
return (ret);
}
@@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
/* free allocated gpio's */
gpio_free(GPIO_PRDY);
for (i = 0; i < ARRAY_SIZE(irqs); i++)
- gpio_free(IRQ_TO_GPIO(irqs[i].irq));
+ gpio_free(irq_to_gpio(irqs[i].irq));
}
static unsigned long trizeps_pcmcia_status[2];
@@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void)
{
int ret;
+ if (!machine_is_trizeps4() && !machine_is_trizeps4wl())
+ return -ENODEV;
+
trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!trizeps_pcmcia_device)
return -ENOMEM;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2ee442c2a5db..0485e394712a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -187,7 +187,8 @@ config MSI_LAPTOP
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
depends on RFKILL
- depends on SERIO_I8042
+ depends on INPUT && SERIO_I8042
+ select INPUT_SPARSEKMAP
---help---
This is a driver for laptops built by MSI (MICRO-STAR
INTERNATIONAL):
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 5ea6c3477d17..ac4e7f83ce6c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -89,7 +89,7 @@ MODULE_LICENSE("GPL");
#define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026"
MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
-MODULE_ALIAS("wmi:6AF4F258-B401-42Fd-BE91-3D4AC2D7C0D3");
+MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3");
MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
enum acer_wmi_event_ids {
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index efc776cb0c66..832a3fd7c1c8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -201,8 +201,8 @@ static int asus_wmi_input_init(struct asus_wmi *asus)
if (!asus->inputdev)
return -ENOMEM;
- asus->inputdev->name = asus->driver->input_phys;
- asus->inputdev->phys = asus->driver->input_name;
+ asus->inputdev->name = asus->driver->input_name;
+ asus->inputdev->phys = asus->driver->input_phys;
asus->inputdev->id.bustype = BUS_HOST;
asus->inputdev->dev.parent = &asus->platform_device->dev;
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 0ddc434fb93b..649dcadd8ea3 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -67,9 +67,11 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_KEY, 0x82, { KEY_CAMERA } },
{ KE_KEY, 0x83, { KEY_CAMERA_ZOOMIN } },
{ KE_KEY, 0x88, { KEY_WLAN } },
+ { KE_KEY, 0xbd, { KEY_CAMERA } },
{ KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */
{ KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
+ { KE_KEY, 0xe8, { KEY_SCREENLOCK } },
{ KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
{ KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
{ KE_KEY, 0xec, { KEY_CAMERA_UP } },
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index d653104b59cb..464bb3fc4d88 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -74,6 +74,19 @@ struct pmic_gpio {
u32 trigger_type;
};
+static void pmic_program_irqtype(int gpio, int type)
+{
+ if (type & IRQ_TYPE_EDGE_RISING)
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
+ else
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
+ else
+ intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
+};
+
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
@@ -166,16 +179,38 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
return pg->irq_base + offset;
}
+static void pmic_bus_lock(struct irq_data *data)
+{
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&pg->buslock);
+}
+
+static void pmic_bus_sync_unlock(struct irq_data *data)
+{
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+
+ if (pg->update_type) {
+ unsigned int gpio = pg->update_type & ~GPIO_UPDATE_TYPE;
+
+ pmic_program_irqtype(gpio, pg->trigger_type);
+ pg->update_type = 0;
+ }
+ mutex_unlock(&pg->buslock);
+}
+
/* the gpiointr register is read-clear, so just do nothing. */
static void pmic_irq_unmask(struct irq_data *data) { }
static void pmic_irq_mask(struct irq_data *data) { }
static struct irq_chip pmic_irqchip = {
- .name = "PMIC-GPIO",
- .irq_mask = pmic_irq_mask,
- .irq_unmask = pmic_irq_unmask,
- .irq_set_type = pmic_irq_type,
+ .name = "PMIC-GPIO",
+ .irq_mask = pmic_irq_mask,
+ .irq_unmask = pmic_irq_unmask,
+ .irq_set_type = pmic_irq_type,
+ .irq_bus_lock = pmic_bus_lock,
+ .irq_bus_sync_unlock = pmic_bus_sync_unlock,
};
static irqreturn_t pmic_irq_handler(int irq, void *data)
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index de434c6dc2d6..d347116d150e 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -571,6 +571,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
+ .ident = "R410 Plus",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
+ DMI_MATCH(DMI_BOARD_NAME, "R460"),
+ },
+ .callback = dmi_check_cb,
+ },
+ {
.ident = "R518",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
@@ -591,12 +601,12 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
.callback = dmi_check_cb,
},
{
- .ident = "N150/N210/N220",
+ .ident = "N150/N210/N220/N230",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR,
"SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"),
- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
+ DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
},
.callback = dmi_check_cb,
},
@@ -771,6 +781,7 @@ static int __init samsung_init(void)
/* create a backlight device to talk to this one */
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = sabi_config->max_brightness;
backlight_device = backlight_device_register("samsung", &sdev->dev,
NULL, &backlight_ops,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e642f5f29504..8f709aec4da0 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -138,6 +138,8 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
"(default: 0)");
+static void sony_nc_kbd_backlight_resume(void);
+
enum sony_nc_rfkill {
SONY_WIFI,
SONY_BLUETOOTH,
@@ -771,11 +773,6 @@ static int sony_nc_handles_setup(struct platform_device *pd)
if (!handles)
return -ENOMEM;
- sysfs_attr_init(&handles->devattr.attr);
- handles->devattr.attr.name = "handles";
- handles->devattr.attr.mode = S_IRUGO;
- handles->devattr.show = sony_nc_handles_show;
-
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
if (!acpi_callsetfunc(sony_nc_acpi_handle,
"SN00", i + 0x20, &result)) {
@@ -785,11 +782,18 @@ static int sony_nc_handles_setup(struct platform_device *pd)
}
}
- /* allow reading capabilities via sysfs */
- if (device_create_file(&pd->dev, &handles->devattr)) {
- kfree(handles);
- handles = NULL;
- return -1;
+ if (debug) {
+ sysfs_attr_init(&handles->devattr.attr);
+ handles->devattr.attr.name = "handles";
+ handles->devattr.attr.mode = S_IRUGO;
+ handles->devattr.show = sony_nc_handles_show;
+
+ /* allow reading capabilities via sysfs */
+ if (device_create_file(&pd->dev, &handles->devattr)) {
+ kfree(handles);
+ handles = NULL;
+ return -1;
+ }
}
return 0;
@@ -798,7 +802,8 @@ static int sony_nc_handles_setup(struct platform_device *pd)
static int sony_nc_handles_cleanup(struct platform_device *pd)
{
if (handles) {
- device_remove_file(&pd->dev, &handles->devattr);
+ if (debug)
+ device_remove_file(&pd->dev, &handles->devattr);
kfree(handles);
handles = NULL;
}
@@ -808,6 +813,11 @@ static int sony_nc_handles_cleanup(struct platform_device *pd)
static int sony_find_snc_handle(int handle)
{
int i;
+
+ /* not initialized yet, return early */
+ if (!handles)
+ return -1;
+
for (i = 0; i < 0x10; i++) {
if (handles->cap[i] == handle) {
dprintk("found handle 0x%.4x (offset: 0x%.2x)\n",
@@ -1168,6 +1178,9 @@ static int sony_nc_resume(struct acpi_device *device)
/* re-read rfkill state */
sony_nc_rfkill_update();
+ /* restore kbd backlight states */
+ sony_nc_kbd_backlight_resume();
+
return 0;
}
@@ -1355,6 +1368,7 @@ out_no_enum:
#define KBDBL_HANDLER 0x137
#define KBDBL_PRESENT 0xB00
#define SET_MODE 0xC00
+#define SET_STATE 0xD00
#define SET_TIMEOUT 0xE00
struct kbd_backlight {
@@ -1377,6 +1391,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
(value << 0x10) | SET_MODE, &result))
return -EIO;
+ /* Try to turn the light on/off immediately */
+ sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
+ &result);
+
kbdbl_handle->mode = value;
return 0;
@@ -1458,7 +1476,7 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
{
int result;
- if (sony_call_snc_handle(0x137, KBDBL_PRESENT, &result))
+ if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
return 0;
if (!(result & 0x02))
return 0;
@@ -1501,13 +1519,36 @@ outkzalloc:
static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
{
if (kbdbl_handle) {
+ int result;
+
device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+
+ /* restore the default hw behaviour */
+ sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
+ sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+
kfree(kbdbl_handle);
}
return 0;
}
+static void sony_nc_kbd_backlight_resume(void)
+{
+ int ignore = 0;
+
+ if (!kbdbl_handle)
+ return;
+
+ if (kbdbl_handle->mode == 0)
+ sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
+
+ if (kbdbl_handle->timeout != 0)
+ sony_call_snc_handle(KBDBL_HANDLER,
+ (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+ &ignore);
+}
+
static void sony_nc_backlight_setup(void)
{
acpi_handle unused;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a08561f5349e..efb3b6b9bcdb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8618,8 +8618,7 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
tpacpi_is_fw_digit(s[1]) &&
s[2] == t && s[3] == 'T' &&
tpacpi_is_fw_digit(s[4]) &&
- tpacpi_is_fw_digit(s[5]) &&
- s[6] == 'W' && s[7] == 'W';
+ tpacpi_is_fw_digit(s[5]);
}
/* returns 0 - probe ok, or < 0 - probe error.
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index c29719cacbca..86c9a091a2ff 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1171,16 +1171,17 @@ static int rio_hdid_setup(char *str)
__setup("riohdid=", rio_hdid_setup);
-void rio_register_mport(struct rio_mport *port)
+int rio_register_mport(struct rio_mport *port)
{
if (next_portid >= RIO_MAX_MPORTS) {
pr_err("RIO: reached specified max number of mports\n");
- return;
+ return 1;
}
port->id = next_portid++;
port->host_deviceid = rio_get_hdid(port->id);
list_add_tail(&port->node, &rio_mports);
+ return 0;
}
EXPORT_SYMBOL_GPL(rio_local_get_device_id);
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 095016a9dec1..ac2701b22e71 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -418,3 +418,4 @@ DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1432, idtg2_switch_init);
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 09b4437b3e61..39013867cbd6 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
err = __rtc_read_alarm(rtc, &alrm);
if (!err && !rtc_valid_tm(&alrm.time))
- rtc_set_alarm(rtc, &alrm);
+ rtc_initialize_alarm(rtc, &alrm);
strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
dev_set_name(&rtc->dev, "rtc%d", id);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 23719f0acbf6..ef6316acec43 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
+/* Called once per device from rtc_device_register */
+int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ err = rtc_valid_tm(&alarm->time);
+ if (err != 0)
+ return err;
+
+ err = mutex_lock_interruptible(&rtc->ops_lock);
+ if (err)
+ return err;
+
+ rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+ rtc->aie_timer.period = ktime_set(0, 0);
+ if (alarm->enabled) {
+ rtc->aie_timer.enabled = 1;
+ timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+ }
+ mutex_unlock(&rtc->ops_lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
+
+
+
int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
{
int err = mutex_lock_interruptible(&rtc->ops_lock);
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index a0fc4cf42abf..90d866272c8e 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
bfin_rtc_int_set_alarm(rtc);
else
bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+
+ return 0;
}
static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 316f484999b5..80f9c88214c5 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -220,6 +220,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
}
clk_disable(rtap->clk);
+ platform_set_drvdata(pdev, rtap);
rtap->rtc = rtc_device_register("coh901331", &pdev->dev, &coh901331_ops,
THIS_MODULE);
if (IS_ERR(rtap->rtc)) {
@@ -227,11 +228,10 @@ static int __init coh901331_probe(struct platform_device *pdev)
goto out_no_rtc;
}
- platform_set_drvdata(pdev, rtap);
-
return 0;
out_no_rtc:
+ platform_set_drvdata(pdev, NULL);
out_no_clk_enable:
clk_put(rtap->clk);
out_no_clk:
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index c42006469559..c5ac03793e79 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -401,6 +401,7 @@ const struct platform_device_id mc13xxx_rtc_idtable[] = {
}, {
.name = "mc13892-rtc",
},
+ { }
};
static struct platform_driver mc13xxx_rtc_driver = {
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index de0dd7b1f146..bcae8dd41496 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -394,7 +394,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
return 0;
fail2:
- free_irq(omap_rtc_timer, NULL);
+ free_irq(omap_rtc_timer, rtc);
fail1:
rtc_device_unregister(rtc);
fail0:
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 714964913e5e..b3466c491cd3 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev)
/* do not clear AIE here, it may be needed for wake */
- s3c_rtc_setpie(dev, 0);
free_irq(s3c_rtc_alarmno, rtc_dev);
free_irq(s3c_rtc_tickno, rtc_dev);
}
@@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
platform_set_drvdata(dev, NULL);
rtc_device_unregister(rtc);
- s3c_rtc_setpie(&dev->dev, 0);
s3c_rtc_setaie(&dev->dev, 0);
clk_disable(rtc_clk);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index df14c51f6532..8e04c00cf0ad 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -541,15 +541,24 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
int force, ret;
unsigned long i;
- if (!dev_fsm_final_state(cdev) &&
- cdev->private->state != DEV_STATE_DISCONNECTED)
- return -EAGAIN;
+ /* Prevent conflict between multiple on-/offline processing requests. */
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
+ /* Prevent conflict between internal I/Os and on-/offline processing. */
+ if (!dev_fsm_final_state(cdev) &&
+ cdev->private->state != DEV_STATE_DISCONNECTED) {
+ ret = -EAGAIN;
+ goto out_onoff;
+ }
+ /* Prevent conflict between pending work and on-/offline processing.*/
+ if (work_pending(&cdev->private->todo_work)) {
+ ret = -EAGAIN;
+ goto out_onoff;
+ }
if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
- atomic_set(&cdev->private->onoff, 0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_onoff;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
@@ -574,6 +583,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
out:
if (cdev->drv)
module_put(cdev->drv->driver.owner);
+out_onoff:
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
@@ -1311,10 +1321,12 @@ static int purge_fn(struct device *dev, void *data)
spin_lock_irq(cdev->ccwlock);
if (is_blacklisted(id->ssid, id->devno) &&
- (cdev->private->state == DEV_STATE_OFFLINE)) {
+ (cdev->private->state == DEV_STATE_OFFLINE) &&
+ (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
id->devno);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
+ atomic_set(&cdev->private->onoff, 0);
}
spin_unlock_irq(cdev->ccwlock);
/* Abort loop in case of pending signal. */
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 479c665e9e7c..c532ba929ccd 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1649,26 +1649,26 @@ static int __init init_QDIO(void)
{
int rc;
- rc = qdio_setup_init();
+ rc = qdio_debug_init();
if (rc)
return rc;
+ rc = qdio_setup_init();
+ if (rc)
+ goto out_debug;
rc = tiqdio_allocate_memory();
if (rc)
goto out_cache;
- rc = qdio_debug_init();
- if (rc)
- goto out_ti;
rc = tiqdio_register_thinints();
if (rc)
- goto out_debug;
+ goto out_ti;
return 0;
-out_debug:
- qdio_debug_exit();
out_ti:
tiqdio_free_memory();
out_cache:
qdio_setup_exit();
+out_debug:
+ qdio_debug_exit();
return rc;
}
@@ -1676,8 +1676,8 @@ static void __exit exit_QDIO(void)
{
tiqdio_unregister_thinints();
tiqdio_free_memory();
- qdio_debug_exit();
qdio_setup_exit();
+ qdio_debug_exit();
}
module_init(init_QDIO);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d5c7ff43f5b..e9901b8f8443 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
- int flagset;
-
/*
* As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
continue;
}
- spin_unlock(shost->host_lock);
-
- spin_lock(sdev->request_queue->queue_lock);
- flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
- !test_bit(QUEUE_FLAG_REENTER,
- &sdev->request_queue->queue_flags);
- if (flagset)
- queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
- __blk_run_queue(sdev->request_queue, false);
- if (flagset)
- queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
- spin_unlock(sdev->request_queue->queue_lock);
-
- spin_lock(shost->host_lock);
+ blk_run_queue_async(sdev->request_queue);
}
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index fdf3fa639056..815069d13f9b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3816,28 +3816,17 @@ fail_host_msg:
static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
- int flagset;
- unsigned long flags;
-
if (!rport->rqst_q)
return;
+ /*
+ * This get/put dance makes no sense
+ */
get_device(&rport->dev);
-
- spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
- flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
- !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
- if (flagset)
- queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
- __blk_run_queue(rport->rqst_q, false);
- if (flagset)
- queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
- spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
-
+ blk_run_queue_async(rport->rqst_q);
put_device(&rport->dev);
}
-
/**
* fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
* @q: rport request queue
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 5825370bad25..08de58e7f59f 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1555,7 +1555,7 @@ static int stop_queue(struct pl022 *pl022)
* A wait_queue on the pl022->busy could be used, but then the common
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead */
- while (!list_empty(&pl022->queue) && pl022->busy && limit--) {
+ while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) {
spin_unlock_irqrestore(&pl022->queue_lock, flags);
msleep(10);
spin_lock_irqsave(&pl022->queue_lock, flags);
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index b1a4b9f503ae..871e337c917f 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -821,7 +821,7 @@ static int stop_queue(struct dw_spi *dws)
spin_lock_irqsave(&dws->lock, flags);
dws->run = QUEUE_STOPPED;
- while (!list_empty(&dws->queue) && dws->busy && limit--) {
+ while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
spin_unlock_irqrestore(&dws->lock, flags);
msleep(10);
spin_lock_irqsave(&dws->lock, flags);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9c74aad6be93..dc25bee8d33f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1493,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data)
* execution path (pump_messages) would be required to call wake_up or
* friends on every SPI message. Do this instead */
drv_data->run = QUEUE_STOPPED;
- while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
spin_unlock_irqrestore(&drv_data->lock, flags);
msleep(10);
spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index bdb7289a1d22..f706dba165cf 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1284,7 +1284,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
* friends on every SPI message. Do this instead
*/
drv_data->running = false;
- while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
spin_unlock_irqrestore(&drv_data->lock, flags);
msleep(10);
spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dca4a0bb6ca9..e3786f161bc3 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -131,8 +131,6 @@ source "drivers/staging/wlags49_h2/Kconfig"
source "drivers/staging/wlags49_h25/Kconfig"
-source "drivers/staging/samsung-laptop/Kconfig"
-
source "drivers/staging/sm7xx/Kconfig"
source "drivers/staging/dt3155v4l/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index eb93012b6f59..f0d5c5315612 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_XVMALLOC) += zram/
obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
obj-$(CONFIG_WLAGS49_H25) += wlags49_h25/
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop/
obj-$(CONFIG_FB_SM7XX) += sm7xx/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_CRYSTALHD) += crystalhd/
diff --git a/drivers/staging/samsung-laptop/Kconfig b/drivers/staging/samsung-laptop/Kconfig
deleted file mode 100644
index f27c60864c26..000000000000
--- a/drivers/staging/samsung-laptop/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-config SAMSUNG_LAPTOP
- tristate "Samsung Laptop driver"
- default n
- depends on RFKILL && BACKLIGHT_CLASS_DEVICE && X86
- help
- This module implements a driver for the N128 Samsung Laptop
- providing control over the Wireless LED and the LCD backlight
-
- To compile this driver as a module, choose
- M here: the module will be called samsung-laptop.
diff --git a/drivers/staging/samsung-laptop/Makefile b/drivers/staging/samsung-laptop/Makefile
deleted file mode 100644
index 3c6f42045211..000000000000
--- a/drivers/staging/samsung-laptop/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
diff --git a/drivers/staging/samsung-laptop/TODO b/drivers/staging/samsung-laptop/TODO
deleted file mode 100644
index f7a6d589916e..000000000000
--- a/drivers/staging/samsung-laptop/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-TODO:
- - review from other developers
- - figure out ACPI video issues
-
-Please send patches to Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
deleted file mode 100644
index 25294462b8b6..000000000000
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ /dev/null
@@ -1,843 +0,0 @@
-/*
- * Samsung Laptop driver
- *
- * Copyright (C) 2009,2011 Greg Kroah-Hartman (gregkh@suse.de)
- * Copyright (C) 2009,2011 Novell Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/dmi.h>
-#include <linux/platform_device.h>
-#include <linux/rfkill.h>
-
-/*
- * This driver is needed because a number of Samsung laptops do not hook
- * their control settings through ACPI. So we have to poke around in the
- * BIOS to do things like brightness values, and "special" key controls.
- */
-
-/*
- * We have 0 - 8 as valid brightness levels. The specs say that level 0 should
- * be reserved by the BIOS (which really doesn't make much sense), we tell
- * userspace that the value is 0 - 7 and then just tell the hardware 1 - 8
- */
-#define MAX_BRIGHT 0x07
-
-
-#define SABI_IFACE_MAIN 0x00
-#define SABI_IFACE_SUB 0x02
-#define SABI_IFACE_COMPLETE 0x04
-#define SABI_IFACE_DATA 0x05
-
-/* Structure to get data back to the calling function */
-struct sabi_retval {
- u8 retval[20];
-};
-
-struct sabi_header_offsets {
- u8 port;
- u8 re_mem;
- u8 iface_func;
- u8 en_mem;
- u8 data_offset;
- u8 data_segment;
-};
-
-struct sabi_commands {
- /*
- * Brightness is 0 - 8, as described above.
- * Value 0 is for the BIOS to use
- */
- u8 get_brightness;
- u8 set_brightness;
-
- /*
- * first byte:
- * 0x00 - wireless is off
- * 0x01 - wireless is on
- * second byte:
- * 0x02 - 3G is off
- * 0x03 - 3G is on
- * TODO, verify 3G is correct, that doesn't seem right...
- */
- u8 get_wireless_button;
- u8 set_wireless_button;
-
- /* 0 is off, 1 is on */
- u8 get_backlight;
- u8 set_backlight;
-
- /*
- * 0x80 or 0x00 - no action
- * 0x81 - recovery key pressed
- */
- u8 get_recovery_mode;
- u8 set_recovery_mode;
-
- /*
- * on seclinux: 0 is low, 1 is high,
- * on swsmi: 0 is normal, 1 is silent, 2 is turbo
- */
- u8 get_performance_level;
- u8 set_performance_level;
-
- /*
- * Tell the BIOS that Linux is running on this machine.
- * 81 is on, 80 is off
- */
- u8 set_linux;
-};
-
-struct sabi_performance_level {
- const char *name;
- u8 value;
-};
-
-struct sabi_config {
- const char *test_string;
- u16 main_function;
- const struct sabi_header_offsets header_offsets;
- const struct sabi_commands commands;
- const struct sabi_performance_level performance_levels[4];
- u8 min_brightness;
- u8 max_brightness;
-};
-
-static const struct sabi_config sabi_configs[] = {
- {
- .test_string = "SECLINUX",
-
- .main_function = 0x4c49,
-
- .header_offsets = {
- .port = 0x00,
- .re_mem = 0x02,
- .iface_func = 0x03,
- .en_mem = 0x04,
- .data_offset = 0x05,
- .data_segment = 0x07,
- },
-
- .commands = {
- .get_brightness = 0x00,
- .set_brightness = 0x01,
-
- .get_wireless_button = 0x02,
- .set_wireless_button = 0x03,
-
- .get_backlight = 0x04,
- .set_backlight = 0x05,
-
- .get_recovery_mode = 0x06,
- .set_recovery_mode = 0x07,
-
- .get_performance_level = 0x08,
- .set_performance_level = 0x09,
-
- .set_linux = 0x0a,
- },
-
- .performance_levels = {
- {
- .name = "silent",
- .value = 0,
- },
- {
- .name = "normal",
- .value = 1,
- },
- { },
- },
- .min_brightness = 1,
- .max_brightness = 8,
- },
- {
- .test_string = "SwSmi@",
-
- .main_function = 0x5843,
-
- .header_offsets = {
- .port = 0x00,
- .re_mem = 0x04,
- .iface_func = 0x02,
- .en_mem = 0x03,
- .data_offset = 0x05,
- .data_segment = 0x07,
- },
-
- .commands = {
- .get_brightness = 0x10,
- .set_brightness = 0x11,
-
- .get_wireless_button = 0x12,
- .set_wireless_button = 0x13,
-
- .get_backlight = 0x2d,
- .set_backlight = 0x2e,
-
- .get_recovery_mode = 0xff,
- .set_recovery_mode = 0xff,
-
- .get_performance_level = 0x31,
- .set_performance_level = 0x32,
-
- .set_linux = 0xff,
- },
-
- .performance_levels = {
- {
- .name = "normal",
- .value = 0,
- },
- {
- .name = "silent",
- .value = 1,
- },
- {
- .name = "overclock",
- .value = 2,
- },
- { },
- },
- .min_brightness = 0,
- .max_brightness = 8,
- },
- { },
-};
-
-static const struct sabi_config *sabi_config;
-
-static void __iomem *sabi;
-static void __iomem *sabi_iface;
-static void __iomem *f0000_segment;
-static struct backlight_device *backlight_device;
-static struct mutex sabi_mutex;
-static struct platform_device *sdev;
-static struct rfkill *rfk;
-
-static int force;
-module_param(force, bool, 0);
-MODULE_PARM_DESC(force,
- "Disable the DMI check and forces the driver to be loaded");
-
-static int debug;
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
-static int sabi_get_command(u8 command, struct sabi_retval *sretval)
-{
- int retval = 0;
- u16 port = readw(sabi + sabi_config->header_offsets.port);
- u8 complete, iface_data;
-
- mutex_lock(&sabi_mutex);
-
- /* enable memory to be able to write to it */
- outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
- /* write out the command */
- writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
- writew(command, sabi_iface + SABI_IFACE_SUB);
- writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
- outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
- /* write protect memory to make it safe */
- outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
- /* see if the command actually succeeded */
- complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
- iface_data = readb(sabi_iface + SABI_IFACE_DATA);
- if (complete != 0xaa || iface_data == 0xff) {
- pr_warn("SABI get command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
- command, complete, iface_data);
- retval = -EINVAL;
- goto exit;
- }
- /*
- * Save off the data into a structure so the caller use it.
- * Right now we only want the first 4 bytes,
- * There are commands that need more, but not for the ones we
- * currently care about.
- */
- sretval->retval[0] = readb(sabi_iface + SABI_IFACE_DATA);
- sretval->retval[1] = readb(sabi_iface + SABI_IFACE_DATA + 1);
- sretval->retval[2] = readb(sabi_iface + SABI_IFACE_DATA + 2);
- sretval->retval[3] = readb(sabi_iface + SABI_IFACE_DATA + 3);
-
-exit:
- mutex_unlock(&sabi_mutex);
- return retval;
-
-}
-
-static int sabi_set_command(u8 command, u8 data)
-{
- int retval = 0;
- u16 port = readw(sabi + sabi_config->header_offsets.port);
- u8 complete, iface_data;
-
- mutex_lock(&sabi_mutex);
-
- /* enable memory to be able to write to it */
- outb(readb(sabi + sabi_config->header_offsets.en_mem), port);
-
- /* write out the command */
- writew(sabi_config->main_function, sabi_iface + SABI_IFACE_MAIN);
- writew(command, sabi_iface + SABI_IFACE_SUB);
- writeb(0, sabi_iface + SABI_IFACE_COMPLETE);
- writeb(data, sabi_iface + SABI_IFACE_DATA);
- outb(readb(sabi + sabi_config->header_offsets.iface_func), port);
-
- /* write protect memory to make it safe */
- outb(readb(sabi + sabi_config->header_offsets.re_mem), port);
-
- /* see if the command actually succeeded */
- complete = readb(sabi_iface + SABI_IFACE_COMPLETE);
- iface_data = readb(sabi_iface + SABI_IFACE_DATA);
- if (complete != 0xaa || iface_data == 0xff) {
- pr_warn("SABI set command 0x%02x failed with completion flag 0x%02x and data 0x%02x\n",
- command, complete, iface_data);
- retval = -EINVAL;
- }
-
- mutex_unlock(&sabi_mutex);
- return retval;
-}
-
-static void test_backlight(void)
-{
- struct sabi_retval sretval;
-
- sabi_get_command(sabi_config->commands.get_backlight, &sretval);
- printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
- sabi_set_command(sabi_config->commands.set_backlight, 0);
- printk(KERN_DEBUG "backlight should be off\n");
-
- sabi_get_command(sabi_config->commands.get_backlight, &sretval);
- printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-
- msleep(1000);
-
- sabi_set_command(sabi_config->commands.set_backlight, 1);
- printk(KERN_DEBUG "backlight should be on\n");
-
- sabi_get_command(sabi_config->commands.get_backlight, &sretval);
- printk(KERN_DEBUG "backlight = 0x%02x\n", sretval.retval[0]);
-}
-
-static void test_wireless(void)
-{
- struct sabi_retval sretval;
-
- sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
- printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
- sabi_set_command(sabi_config->commands.set_wireless_button, 0);
- printk(KERN_DEBUG "wireless led should be off\n");
-
- sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
- printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-
- msleep(1000);
-
- sabi_set_command(sabi_config->commands.set_wireless_button, 1);
- printk(KERN_DEBUG "wireless led should be on\n");
-
- sabi_get_command(sabi_config->commands.get_wireless_button, &sretval);
- printk(KERN_DEBUG "wireless led = 0x%02x\n", sretval.retval[0]);
-}
-
-static u8 read_brightness(void)
-{
- struct sabi_retval sretval;
- int user_brightness = 0;
- int retval;
-
- retval = sabi_get_command(sabi_config->commands.get_brightness,
- &sretval);
- if (!retval) {
- user_brightness = sretval.retval[0];
- if (user_brightness != 0)
- user_brightness -= sabi_config->min_brightness;
- }
- return user_brightness;
-}
-
-static void set_brightness(u8 user_brightness)
-{
- u8 user_level = user_brightness - sabi_config->min_brightness;
-
- sabi_set_command(sabi_config->commands.set_brightness, user_level);
-}
-
-static int get_brightness(struct backlight_device *bd)
-{
- return (int)read_brightness();
-}
-
-static int update_status(struct backlight_device *bd)
-{
- set_brightness(bd->props.brightness);
-
- if (bd->props.power == FB_BLANK_UNBLANK)
- sabi_set_command(sabi_config->commands.set_backlight, 1);
- else
- sabi_set_command(sabi_config->commands.set_backlight, 0);
- return 0;
-}
-
-static const struct backlight_ops backlight_ops = {
- .get_brightness = get_brightness,
- .update_status = update_status,
-};
-
-static int rfkill_set(void *data, bool blocked)
-{
- /* Do something with blocked...*/
- /*
- * blocked == false is on
- * blocked == true is off
- */
- if (blocked)
- sabi_set_command(sabi_config->commands.set_wireless_button, 0);
- else
- sabi_set_command(sabi_config->commands.set_wireless_button, 1);
-
- return 0;
-}
-
-static struct rfkill_ops rfkill_ops = {
- .set_block = rfkill_set,
-};
-
-static int init_wireless(struct platform_device *sdev)
-{
- int retval;
-
- rfk = rfkill_alloc("samsung-wifi", &sdev->dev, RFKILL_TYPE_WLAN,
- &rfkill_ops, NULL);
- if (!rfk)
- return -ENOMEM;
-
- retval = rfkill_register(rfk);
- if (retval) {
- rfkill_destroy(rfk);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void destroy_wireless(void)
-{
- rfkill_unregister(rfk);
- rfkill_destroy(rfk);
-}
-
-static ssize_t get_performance_level(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct sabi_retval sretval;
- int retval;
- int i;
-
- /* Read the state */
- retval = sabi_get_command(sabi_config->commands.get_performance_level,
- &sretval);
- if (retval)
- return retval;
-
- /* The logic is backwards, yeah, lots of fun... */
- for (i = 0; sabi_config->performance_levels[i].name; ++i) {
- if (sretval.retval[0] == sabi_config->performance_levels[i].value)
- return sprintf(buf, "%s\n", sabi_config->performance_levels[i].name);
- }
- return sprintf(buf, "%s\n", "unknown");
-}
-
-static ssize_t set_performance_level(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- if (count >= 1) {
- int i;
- for (i = 0; sabi_config->performance_levels[i].name; ++i) {
- const struct sabi_performance_level *level =
- &sabi_config->performance_levels[i];
- if (!strncasecmp(level->name, buf, strlen(level->name))) {
- sabi_set_command(sabi_config->commands.set_performance_level,
- level->value);
- break;
- }
- }
- if (!sabi_config->performance_levels[i].name)
- return -EINVAL;
- }
- return count;
-}
-static DEVICE_ATTR(performance_level, S_IWUSR | S_IRUGO,
- get_performance_level, set_performance_level);
-
-
-static int __init dmi_check_cb(const struct dmi_system_id *id)
-{
- pr_info("found laptop model '%s'\n",
- id->ident);
- return 0;
-}
-
-static struct dmi_system_id __initdata samsung_dmi_table[] = {
- {
- .ident = "N128",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N128"),
- DMI_MATCH(DMI_BOARD_NAME, "N128"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N130",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N130"),
- DMI_MATCH(DMI_BOARD_NAME, "N130"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "X125",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X125"),
- DMI_MATCH(DMI_BOARD_NAME, "X125"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "X120/X170",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X120/X170"),
- DMI_MATCH(DMI_BOARD_NAME, "X120/X170"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "NC10",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NC10"),
- DMI_MATCH(DMI_BOARD_NAME, "NC10"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "NP-Q45",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "SQ45S70S"),
- DMI_MATCH(DMI_BOARD_NAME, "SQ45S70S"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "X360",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
- DMI_MATCH(DMI_BOARD_NAME, "X360"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R410 Plus",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R410P"),
- DMI_MATCH(DMI_BOARD_NAME, "R460"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R518",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R518"),
- DMI_MATCH(DMI_BOARD_NAME, "R518"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R519/R719",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R519/R719"),
- DMI_MATCH(DMI_BOARD_NAME, "R519/R719"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N150/N210/N220/N230",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220/N230"),
- DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220/N230"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N150P/N210P/N220P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N150P/N210P/N220P"),
- DMI_MATCH(DMI_BOARD_NAME, "N150P/N210P/N220P"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R530/R730",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R530/R730"),
- DMI_MATCH(DMI_BOARD_NAME, "R530/R730"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "NF110/NF210/NF310",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "NF110/NF210/NF310"),
- DMI_MATCH(DMI_BOARD_NAME, "NF110/NF210/NF310"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "N145P/N250P/N260P",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
- DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "R70/R71",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR,
- "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "R70/R71"),
- DMI_MATCH(DMI_BOARD_NAME, "R70/R71"),
- },
- .callback = dmi_check_cb,
- },
- {
- .ident = "P460",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
- DMI_MATCH(DMI_PRODUCT_NAME, "P460"),
- DMI_MATCH(DMI_BOARD_NAME, "P460"),
- },
- .callback = dmi_check_cb,
- },
- { },
-};
-MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
-
-static int find_signature(void __iomem *memcheck, const char *testStr)
-{
- int i = 0;
- int loca;
-
- for (loca = 0; loca < 0xffff; loca++) {
- char temp = readb(memcheck + loca);
-
- if (temp == testStr[i]) {
- if (i == strlen(testStr)-1)
- break;
- ++i;
- } else {
- i = 0;
- }
- }
- return loca;
-}
-
-static int __init samsung_init(void)
-{
- struct backlight_properties props;
- struct sabi_retval sretval;
- unsigned int ifaceP;
- int i;
- int loca;
- int retval;
-
- mutex_init(&sabi_mutex);
-
- if (!force && !dmi_check_system(samsung_dmi_table))
- return -ENODEV;
-
- f0000_segment = ioremap_nocache(0xf0000, 0xffff);
- if (!f0000_segment) {
- pr_err("Can't map the segment at 0xf0000\n");
- return -EINVAL;
- }
-
- /* Try to find one of the signatures in memory to find the header */
- for (i = 0; sabi_configs[i].test_string != 0; ++i) {
- sabi_config = &sabi_configs[i];
- loca = find_signature(f0000_segment, sabi_config->test_string);
- if (loca != 0xffff)
- break;
- }
-
- if (loca == 0xffff) {
- pr_err("This computer does not support SABI\n");
- goto error_no_signature;
- }
-
- /* point to the SMI port Number */
- loca += 1;
- sabi = (f0000_segment + loca);
-
- if (debug) {
- printk(KERN_DEBUG "This computer supports SABI==%x\n",
- loca + 0xf0000 - 6);
- printk(KERN_DEBUG "SABI header:\n");
- printk(KERN_DEBUG " SMI Port Number = 0x%04x\n",
- readw(sabi + sabi_config->header_offsets.port));
- printk(KERN_DEBUG " SMI Interface Function = 0x%02x\n",
- readb(sabi + sabi_config->header_offsets.iface_func));
- printk(KERN_DEBUG " SMI enable memory buffer = 0x%02x\n",
- readb(sabi + sabi_config->header_offsets.en_mem));
- printk(KERN_DEBUG " SMI restore memory buffer = 0x%02x\n",
- readb(sabi + sabi_config->header_offsets.re_mem));
- printk(KERN_DEBUG " SABI data offset = 0x%04x\n",
- readw(sabi + sabi_config->header_offsets.data_offset));
- printk(KERN_DEBUG " SABI data segment = 0x%04x\n",
- readw(sabi + sabi_config->header_offsets.data_segment));
- }
-
- /* Get a pointer to the SABI Interface */
- ifaceP = (readw(sabi + sabi_config->header_offsets.data_segment) & 0x0ffff) << 4;
- ifaceP += readw(sabi + sabi_config->header_offsets.data_offset) & 0x0ffff;
- sabi_iface = ioremap_nocache(ifaceP, 16);
- if (!sabi_iface) {
- pr_err("Can't remap %x\n", ifaceP);
- goto exit;
- }
- if (debug) {
- printk(KERN_DEBUG "ifaceP = 0x%08x\n", ifaceP);
- printk(KERN_DEBUG "sabi_iface = %p\n", sabi_iface);
-
- test_backlight();
- test_wireless();
-
- retval = sabi_get_command(sabi_config->commands.get_brightness,
- &sretval);
- printk(KERN_DEBUG "brightness = 0x%02x\n", sretval.retval[0]);
- }
-
- /* Turn on "Linux" mode in the BIOS */
- if (sabi_config->commands.set_linux != 0xff) {
- retval = sabi_set_command(sabi_config->commands.set_linux,
- 0x81);
- if (retval) {
- pr_warn("Linux mode was not set!\n");
- goto error_no_platform;
- }
- }
-
- /* knock up a platform device to hang stuff off of */
- sdev = platform_device_register_simple("samsung", -1, NULL, 0);
- if (IS_ERR(sdev))
- goto error_no_platform;
-
- /* create a backlight device to talk to this one */
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = sabi_config->max_brightness;
- backlight_device = backlight_device_register("samsung", &sdev->dev,
- NULL, &backlight_ops,
- &props);
- if (IS_ERR(backlight_device))
- goto error_no_backlight;
-
- backlight_device->props.brightness = read_brightness();
- backlight_device->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(backlight_device);
-
- retval = init_wireless(sdev);
- if (retval)
- goto error_no_rfk;
-
- retval = device_create_file(&sdev->dev, &dev_attr_performance_level);
- if (retval)
- goto error_file_create;
-
-exit:
- return 0;
-
-error_file_create:
- destroy_wireless();
-
-error_no_rfk:
- backlight_device_unregister(backlight_device);
-
-error_no_backlight:
- platform_device_unregister(sdev);
-
-error_no_platform:
- iounmap(sabi_iface);
-
-error_no_signature:
- iounmap(f0000_segment);
- return -EINVAL;
-}
-
-static void __exit samsung_exit(void)
-{
- /* Turn off "Linux" mode in the BIOS */
- if (sabi_config->commands.set_linux != 0xff)
- sabi_set_command(sabi_config->commands.set_linux, 0x80);
-
- device_remove_file(&sdev->dev, &dev_attr_performance_level);
- backlight_device_unregister(backlight_device);
- destroy_wireless();
- iounmap(sabi_iface);
- iounmap(f0000_segment);
- platform_device_unregister(sdev);
-}
-
-module_init(samsung_init);
-module_exit(samsung_exit);
-
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-MODULE_DESCRIPTION("Samsung Backlight driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 47f8cdb207f1..74273e638c0d 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1658,8 +1658,12 @@ static void gsm_queue(struct gsm_mux *gsm)
if ((gsm->control & ~PF) == UI)
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
- /* generate final CRC with received FCS */
- gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+ if (gsm->encoding == 0){
+ /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
+ In this case it contain the last piece of data
+ required to generate final CRC */
+ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
+ }
if (gsm->fcs != GOOD_FCS) {
gsm->bad_fcs++;
if (debug & 4)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index cb36b0d4ef3c..62df72d9f0aa 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -382,12 +382,13 @@ static void imx_start_tx(struct uart_port *port)
static irqreturn_t imx_rtsint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
+ unsigned int val;
unsigned long flags;
spin_lock_irqsave(&sport->port.lock, flags);
writel(USR1_RTSD, sport->port.membase + USR1);
+ val = readl(sport->port.membase + USR1) & USR1_RTSS;
uart_handle_cts_change(&sport->port, !!val);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 41b6e51188e4..006489d82dc3 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_VT8500
default y if PLAT_SPEAR
default y if ARCH_MSM
+ default y if MICROBLAZE
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index a3d2e2399655..96fdfb815f89 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
break;
case USB_ENDPOINT_XFER_INT:
type = "Int.";
- if (speed == USB_SPEED_HIGH)
+ if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
interval = 1 << (desc->bInterval - 1);
else
interval = desc->bInterval;
@@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
default: /* "can't happen" */
return start;
}
- interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000;
+ interval *= (speed == USB_SPEED_HIGH ||
+ speed == USB_SPEED_SUPER) ? 125 : 1000;
if (interval % 1000)
unit = 'u';
else {
@@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
if (level == 0) {
int max;
- /* high speed reserves 80%, full/low reserves 90% */
- if (usbdev->speed == USB_SPEED_HIGH)
+ /* super/high speed reserves 80%, full/low reserves 90% */
+ if (usbdev->speed == USB_SPEED_HIGH ||
+ usbdev->speed == USB_SPEED_SUPER)
max = 800;
else
max = FRAME_TIME_MAX_USECS_ALLOC;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8eed05d23838..77a7faec8d78 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface,
/* Streams only apply to bulk endpoints. */
for (i = 0; i < num_eps; i++)
- if (!usb_endpoint_xfer_bulk(&eps[i]->desc))
+ if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
return;
hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8fb754916c67..93720bdc9efd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
}
/* see 7.1.7.6 */
- status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND);
+ /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0
+ * external hub.
+ * FIXME: this is a temporary workaround to make the system able
+ * to suspend/resume.
+ */
+ if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
+ status = clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_POWER);
+ else
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 9abecfddb27d..0111f8a9cf7f 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
struct f_audio *audio = func_to_audio(f);
usb_free_descriptors(f->descriptors);
+ usb_free_descriptors(f->hs_descriptors);
kfree(audio);
}
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 95dd4662d6a8..b3c304290150 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f)
static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
+ struct sk_buff *skb = (struct sk_buff *)req->context;
+
+ dev_kfree_skb_any(skb);
}
/*
@@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port,
skb_trim(skb2, len);
put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
- skb_copy_bits(skb, 0, req->buf, skb->len);
- req->length = skb->len;
+ skb_copy_bits(skb2, 0, req->buf, skb2->len);
+ req->length = skb2->len;
req->complete = eem_cmd_complete;
req->zero = 1;
+ req->context = skb2;
if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
DBG(cdev, "echo response queue fail\n");
break;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index aee7e3c53c38..36613b37c504 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
static int txcomplete(struct qe_ep *ep, unsigned char restart)
{
if (ep->tx_req != NULL) {
+ struct qe_req *req = ep->tx_req;
+ unsigned zlp = 0, last_len = 0;
+
+ last_len = min_t(unsigned, req->req.length - ep->sent,
+ ep->ep.maxpacket);
+
if (!restart) {
int asent = ep->last;
ep->sent += asent;
@@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart)
ep->last = 0;
}
+ /* zlp needed when req->re.zero is set */
+ if (req->req.zero) {
+ if (last_len == 0 ||
+ (req->req.length % ep->ep.maxpacket) != 0)
+ zlp = 0;
+ else
+ zlp = 1;
+ } else
+ zlp = 0;
+
/* a request already were transmitted completely */
- if ((ep->tx_req->req.length - ep->sent) <= 0) {
- ep->tx_req->req.actual = (unsigned int)ep->sent;
+ if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
done(ep, ep->tx_req, 0);
ep->tx_req = NULL;
ep->last = 0;
@@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
buf = (u8 *)ep->tx_req->req.buf + ep->sent;
if (buf && size) {
ep->last = size;
+ ep->tx_req->req.actual += size;
frame_set_data(frame, buf);
frame_set_length(frame, size);
frame_set_status(frame, FRAME_OK);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 3ed73f49cf18..a01383f71f38 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
/* halt any endpoint by doing a "wrong direction" i/o call */
if (usb_endpoint_dir_in(&data->desc)) {
- if (usb_endpoint_xfer_isoc(&data->desc))
+ if (usb_endpoint_xfer_isoc(&data->desc)) {
+ mutex_unlock(&data->lock);
return -EINVAL;
+ }
DBG (data->dev, "%s halt\n", data->name);
spin_lock_irq (&data->dev->lock);
if (likely (data->ep != NULL))
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 3e4b35e50c24..68dbcc3e4cc2 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
return -EINVAL;
if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
- spin_lock_irqsave(&ep->dev->lock, iflags);
+ spin_lock_irqsave(&dev->lock, iflags);
/* map the buffer for dma */
if (usbreq->length &&
((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
@@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
DMA_FROM_DEVICE);
} else {
req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
- if (!req->buf)
- return -ENOMEM;
+ if (!req->buf) {
+ retval = -ENOMEM;
+ goto probe_end;
+ }
if (ep->in) {
memcpy(req->buf, usbreq->buf, usbreq->length);
req->dma = dma_map_single(&dev->pdev->dev,
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 015118535f77..6dcc1f68fa60 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597)
if (dvsq == DS_DFLT) {
/* bus reset */
+ spin_unlock(&r8a66597->lock);
r8a66597->driver->disconnect(&r8a66597->gadget);
+ spin_lock(&r8a66597->lock);
r8a66597_update_usb_speed(r8a66597);
}
if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 98ded66e8d3f..42abd0f603bf 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
static void scan_async (struct ehci_hcd *ehci)
{
+ bool stopped;
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
rescan:
+ stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
qh = ehci->async->qh_next.qh;
if (likely (qh != NULL)) {
do {
/* clean any finished work for this qh */
- if (!list_empty (&qh->qtd_list)
- && qh->stamp != ehci->stamp) {
+ if (!list_empty(&qh->qtd_list) && (stopped ||
+ qh->stamp != ehci->stamp)) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
- * qhs we already finished (no looping).
+ * qhs we already finished (no looping)
+ * unless the controller is stopped.
*/
qh = qh_get (qh);
qh->stamp = ehci->stamp;
@@ -1285,9 +1288,9 @@ rescan:
*/
if (list_empty(&qh->qtd_list)
&& qh->qh_state == QH_STATE_LINKED) {
- if (!ehci->reclaim
- && ((ehci->stamp - qh->stamp) & 0x1fff)
- >= (EHCI_SHRINK_FRAMES * 8))
+ if (!ehci->reclaim && (stopped ||
+ ((ehci->stamp - qh->stamp) & 0x1fff)
+ >= EHCI_SHRINK_FRAMES * 8))
start_unlink_async(ehci, qh);
else
action = TIMER_ASYNC_SHRINK;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index f50e84ac570a..795345ad45e6 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
}
dev_err(hcd->self.controller,
- "%s: Can not allocate %lu bytes of memory\n"
+ "%s: Cannot allocate %zu bytes of memory\n"
"Current memory map:\n",
__func__, qtd->length);
for (i = 0; i < BLOCKS; i++) {
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 17a6043c1fa0..958d985f2951 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -33,7 +33,7 @@
#ifdef __LITTLE_ENDIAN
#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
-#elif __BIG_ENDIAN
+#elif defined(__BIG_ENDIAN)
#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
USBH_ENABLE_BE)
#else
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1d586d4f7b56..9b166d70ae91 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void)
{
u8 rev = 0;
unsigned long flags;
+ struct amd_chipset_info info;
+ int ret;
spin_lock_irqsave(&amd_lock, flags);
- amd_chipset.probe_count++;
/* probe only once */
- if (amd_chipset.probe_count > 1) {
+ if (amd_chipset.probe_count > 0) {
+ amd_chipset.probe_count++;
spin_unlock_irqrestore(&amd_lock, flags);
return amd_chipset.probe_result;
}
+ memset(&info, 0, sizeof(info));
+ spin_unlock_irqrestore(&amd_lock, flags);
- amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
- if (amd_chipset.smbus_dev) {
- rev = amd_chipset.smbus_dev->revision;
+ info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
+ if (info.smbus_dev) {
+ rev = info.smbus_dev->revision;
if (rev >= 0x40)
- amd_chipset.sb_type = 1;
+ info.sb_type = 1;
else if (rev >= 0x30 && rev <= 0x3b)
- amd_chipset.sb_type = 3;
+ info.sb_type = 3;
} else {
- amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x780b, NULL);
- if (!amd_chipset.smbus_dev) {
- spin_unlock_irqrestore(&amd_lock, flags);
- return 0;
+ info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x780b, NULL);
+ if (!info.smbus_dev) {
+ ret = 0;
+ goto commit;
}
- rev = amd_chipset.smbus_dev->revision;
+
+ rev = info.smbus_dev->revision;
if (rev >= 0x11 && rev <= 0x18)
- amd_chipset.sb_type = 2;
+ info.sb_type = 2;
}
- if (amd_chipset.sb_type == 0) {
- if (amd_chipset.smbus_dev) {
- pci_dev_put(amd_chipset.smbus_dev);
- amd_chipset.smbus_dev = NULL;
+ if (info.sb_type == 0) {
+ if (info.smbus_dev) {
+ pci_dev_put(info.smbus_dev);
+ info.smbus_dev = NULL;
}
- spin_unlock_irqrestore(&amd_lock, flags);
- return 0;
+ ret = 0;
+ goto commit;
}
- amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
- if (amd_chipset.nb_dev) {
- amd_chipset.nb_type = 1;
+ info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
+ if (info.nb_dev) {
+ info.nb_type = 1;
} else {
- amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x1510, NULL);
- if (amd_chipset.nb_dev) {
- amd_chipset.nb_type = 2;
- } else {
- amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x9600, NULL);
- if (amd_chipset.nb_dev)
- amd_chipset.nb_type = 3;
+ info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
+ if (info.nb_dev) {
+ info.nb_type = 2;
+ } else {
+ info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x9600, NULL);
+ if (info.nb_dev)
+ info.nb_type = 3;
}
}
- amd_chipset.probe_result = 1;
+ ret = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
- spin_unlock_irqrestore(&amd_lock, flags);
- return amd_chipset.probe_result;
+commit:
+
+ spin_lock_irqsave(&amd_lock, flags);
+ if (amd_chipset.probe_count > 0) {
+ /* race - someone else was faster - drop devices */
+
+ /* Mark that we where here */
+ amd_chipset.probe_count++;
+ ret = amd_chipset.probe_result;
+
+ spin_unlock_irqrestore(&amd_lock, flags);
+
+ if (info.nb_dev)
+ pci_dev_put(info.nb_dev);
+ if (info.smbus_dev)
+ pci_dev_put(info.smbus_dev);
+
+ } else {
+ /* no race - commit the result */
+ info.probe_count++;
+ amd_chipset = info;
+ spin_unlock_irqrestore(&amd_lock, flags);
+ }
+
+ return ret;
}
EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
@@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
void usb_amd_dev_put(void)
{
+ struct pci_dev *nb, *smbus;
unsigned long flags;
spin_lock_irqsave(&amd_lock, flags);
@@ -294,20 +322,23 @@ void usb_amd_dev_put(void)
return;
}
- if (amd_chipset.nb_dev) {
- pci_dev_put(amd_chipset.nb_dev);
- amd_chipset.nb_dev = NULL;
- }
- if (amd_chipset.smbus_dev) {
- pci_dev_put(amd_chipset.smbus_dev);
- amd_chipset.smbus_dev = NULL;
- }
+ /* save them to pci_dev_put outside of spinlock */
+ nb = amd_chipset.nb_dev;
+ smbus = amd_chipset.smbus_dev;
+
+ amd_chipset.nb_dev = NULL;
+ amd_chipset.smbus_dev = NULL;
amd_chipset.nb_type = 0;
amd_chipset.sb_type = 0;
amd_chipset.isoc_reqs = 0;
amd_chipset.probe_result = 0;
spin_unlock_irqrestore(&amd_lock, flags);
+
+ if (nb)
+ pci_dev_put(nb);
+ if (smbus)
+ pci_dev_put(smbus);
}
EXPORT_SYMBOL_GPL(usb_amd_dev_put);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a003e79aacdc..627f3438028c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
- if (port_speed == 0 || port_speed == -1)
+ if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
@@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return 0;
}
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ *
+ */
+static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval;
+
+ interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
+ if (interval != ep->desc.bInterval - 1)
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval to %d microframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval);
+
+ return interval;
+}
+
+/*
+ * Convert bInterval expressed in frames (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int interval;
+
+ interval = fls(8 * ep->desc.bInterval) - 1;
+ interval = clamp_val(interval, 3, 10);
+ if ((1 << interval) != 8 * ep->desc.bInterval)
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval,
+ 8 * ep->desc.bInterval);
+
+ return interval;
+}
+
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
@@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
* is set to 0.
*/
-static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
+static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval = 0;
@@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
- usb_endpoint_xfer_bulk(&ep->desc))
+ usb_endpoint_xfer_bulk(&ep->desc)) {
interval = ep->desc.bInterval;
+ break;
+ }
/* Fall through - SS and HS isoc/int have same decoding */
+
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
- usb_endpoint_xfer_isoc(&ep->desc)) {
- if (ep->desc.bInterval == 0)
- interval = 0;
- else
- interval = ep->desc.bInterval - 1;
- if (interval > 15)
- interval = 15;
- if (interval != ep->desc.bInterval + 1)
- dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
- ep->desc.bEndpointAddress, 1 << interval);
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+ interval = xhci_parse_exponent_interval(udev, ep);
}
break;
- /* Convert bInterval (in 1-255 frames) to microframes and round down to
- * nearest power of 2.
- */
+
case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_int(&ep->desc)) {
+ interval = xhci_parse_exponent_interval(udev, ep);
+ break;
+ }
+ /*
+ * Fall through for isochronous endpoint interval decoding
+ * since it uses the same rules as low speed interrupt
+ * endpoints.
+ */
+
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
- usb_endpoint_xfer_isoc(&ep->desc)) {
- interval = fls(8*ep->desc.bInterval) - 1;
- if (interval > 10)
- interval = 10;
- if (interval < 3)
- interval = 3;
- if ((1 << interval) != 8*ep->desc.bInterval)
- dev_warn(&udev->dev,
- "ep %#x - rounding interval"
- " to %d microframes, "
- "ep desc says %d microframes\n",
- ep->desc.bEndpointAddress,
- 1 << interval,
- 8*ep->desc.bInterval);
+ usb_endpoint_xfer_isoc(&ep->desc)) {
+
+ interval = xhci_parse_frame_interval(udev, ep);
}
break;
+
default:
BUG();
}
@@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
-static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
+static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (udev->speed != USB_SPEED_SUPER ||
@@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
return ep->ss_ep_comp.bmAttributes;
}
-static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
+static u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int in;
@@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
-static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint *ep)
{
@@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
* found a similar duplicate.
*/
if (xhci->port_array[i] != major_revision &&
- xhci->port_array[i] != (u8) -1) {
+ xhci->port_array[i] != DUPLICATE_ENTRY) {
if (xhci->port_array[i] == 0x03)
xhci->num_usb3_ports--;
else
xhci->num_usb2_ports--;
- xhci->port_array[i] = (u8) -1;
+ xhci->port_array[i] = DUPLICATE_ENTRY;
}
/* FIXME: Should we disable the port? */
continue;
@@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
for (i = 0; i < num_ports; i++) {
if (xhci->port_array[i] == 0x03 ||
xhci->port_array[i] == 0 ||
- xhci->port_array[i] == -1)
+ xhci->port_array[i] == DUPLICATE_ENTRY)
continue;
xhci->usb2_ports[port_index] =
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ceea9f33491c..a10494c2f3c7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
+ /* AMD PLL quirk */
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ xhci->quirks |= XHCI_AMD_PLL_FIX;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cfc1ad92473f..7437386a9a50 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
/* Does this link TRB point to the first segment in a ring,
* or was the previous TRB the last TRB on the last segment in the ERST?
*/
-static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
@@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring
* segment? I.e. would the updated event TRB pointer step off the end of the
* event seg?
*/
-static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *seg, union xhci_trb *trb)
{
if (ring == xhci->event_ring)
@@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
}
-static inline int enqueue_is_link_trb(struct xhci_ring *ring)
+static int enqueue_is_link_trb(struct xhci_ring *ring)
{
struct xhci_link_trb *link = &ring->enqueue->link;
return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
@@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
ep->ep_state |= SET_DEQ_PENDING;
}
-static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
+static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
ep->ep_state &= ~EP_HALT_PENDING;
@@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
+ }
+ }
usb_hcd_unlink_urb_from_ep(hcd, urb);
xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
@@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
* Skip ports that don't have known speeds, or have duplicate
* Extended Capabilities port speed entries.
*/
- if (port_speed == 0 || port_speed == -1)
+ if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
continue;
/*
@@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
u8 major_revision;
struct xhci_bus_state *bus_state;
u32 __iomem **port_array;
+ bool bogus_port_status = false;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
@@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
+ bogus_port_status = true;
goto cleanup;
}
@@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_warn(xhci, "Event for port %u not in "
"Extended Capabilities, ignoring.\n",
port_id);
+ bogus_port_status = true;
goto cleanup;
}
- if (major_revision == (u8) -1) {
+ if (major_revision == DUPLICATE_ENTRY) {
xhci_warn(xhci, "Event for port %u duplicated in"
"Extended Capabilities, ignoring.\n",
port_id);
+ bogus_port_status = true;
goto cleanup;
}
@@ -1335,6 +1346,13 @@ cleanup:
/* Update event ring dequeue pointer before dropping the lock */
inc_deq(xhci, xhci->event_ring, true);
+ /* Don't make the USB core poll the roothub if we got a bad port status
+ * change event. Besides, at that point we can't tell which roothub
+ * (USB 2.0 or USB 3.0) to kick.
+ */
+ if (bogus_port_status)
+ return;
+
spin_unlock(&xhci->lock);
/* Pass this up to the core */
usb_hcd_poll_rh_status(hcd);
@@ -1554,8 +1572,17 @@ td_cleanup:
urb_priv->td_cnt++;
/* Giveback the urb when all the tds are completed */
- if (urb_priv->td_cnt == urb_priv->length)
+ if (urb_priv->td_cnt == urb_priv->length) {
ret = 1;
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
+ == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_enable();
+ }
+ }
+ }
}
return ret;
@@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
struct urb_priv *urb_priv;
int idx;
int len = 0;
- int skip_td = 0;
union xhci_trb *cur_trb;
struct xhci_segment *cur_seg;
+ struct usb_iso_packet_descriptor *frame;
u32 trb_comp_code;
+ bool skip_td = false;
ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
trb_comp_code = GET_COMP_CODE(event->transfer_len);
urb_priv = td->urb->hcpriv;
idx = urb_priv->td_cnt;
+ frame = &td->urb->iso_frame_desc[idx];
- if (ep->skip) {
- /* The transfer is partly done */
- *status = -EXDEV;
- td->urb->iso_frame_desc[idx].status = -EXDEV;
- } else {
- /* handle completion code */
- switch (trb_comp_code) {
- case COMP_SUCCESS:
- td->urb->iso_frame_desc[idx].status = 0;
- xhci_dbg(xhci, "Successful isoc transfer!\n");
- break;
- case COMP_SHORT_TX:
- if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
- td->urb->iso_frame_desc[idx].status =
- -EREMOTEIO;
- else
- td->urb->iso_frame_desc[idx].status = 0;
- break;
- case COMP_BW_OVER:
- td->urb->iso_frame_desc[idx].status = -ECOMM;
- skip_td = 1;
- break;
- case COMP_BUFF_OVER:
- case COMP_BABBLE:
- td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
- skip_td = 1;
- break;
- case COMP_STALL:
- td->urb->iso_frame_desc[idx].status = -EPROTO;
- skip_td = 1;
- break;
- case COMP_STOP:
- case COMP_STOP_INVAL:
- break;
- default:
- td->urb->iso_frame_desc[idx].status = -1;
- break;
- }
- }
-
- /* calc actual length */
- if (ep->skip) {
- td->urb->iso_frame_desc[idx].actual_length = 0;
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
- return finish_td(xhci, td, event_trb, event, ep, status, true);
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ frame->status = 0;
+ xhci_dbg(xhci, "Successful isoc transfer!\n");
+ break;
+ case COMP_SHORT_TX:
+ frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
+ -EREMOTEIO : 0;
+ break;
+ case COMP_BW_OVER:
+ frame->status = -ECOMM;
+ skip_td = true;
+ break;
+ case COMP_BUFF_OVER:
+ case COMP_BABBLE:
+ frame->status = -EOVERFLOW;
+ skip_td = true;
+ break;
+ case COMP_STALL:
+ frame->status = -EPROTO;
+ skip_td = true;
+ break;
+ case COMP_STOP:
+ case COMP_STOP_INVAL:
+ break;
+ default:
+ frame->status = -1;
+ break;
}
- if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
- td->urb->iso_frame_desc[idx].actual_length =
- td->urb->iso_frame_desc[idx].length;
- td->urb->actual_length +=
- td->urb->iso_frame_desc[idx].length;
+ if (trb_comp_code == COMP_SUCCESS || skip_td) {
+ frame->actual_length = frame->length;
+ td->urb->actual_length += frame->length;
} else {
for (cur_trb = ep_ring->dequeue,
cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
TRB_LEN(event->transfer_len);
if (trb_comp_code != COMP_STOP_INVAL) {
- td->urb->iso_frame_desc[idx].actual_length = len;
+ frame->actual_length = len;
td->urb->actual_length += len;
}
}
@@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
return finish_td(xhci, td, event_trb, event, ep, status, false);
}
+static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_transfer_event *event,
+ struct xhci_virt_ep *ep, int *status)
+{
+ struct xhci_ring *ep_ring;
+ struct urb_priv *urb_priv;
+ struct usb_iso_packet_descriptor *frame;
+ int idx;
+
+ ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
+ urb_priv = td->urb->hcpriv;
+ idx = urb_priv->td_cnt;
+ frame = &td->urb->iso_frame_desc[idx];
+
+ /* The transfer is partly done */
+ *status = -EXDEV;
+ frame->status = -EXDEV;
+
+ /* calc actual length */
+ frame->actual_length = 0;
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring, false);
+
+ return finish_td(xhci, td, NULL, event, ep, status, true);
+}
+
/*
* Process bulk and interrupt tds, update urb status and actual_length.
*/
@@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
+
/* Is this a TRB in the currently executing TD? */
event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
td->last_trb, event_dma);
- if (event_seg && ep->skip) {
+ if (!event_seg) {
+ if (!ep->skip ||
+ !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
+ /* HC is busted, give up! */
+ xhci_err(xhci,
+ "ERROR Transfer event TRB DMA ptr not "
+ "part of current TD\n");
+ return -ESHUTDOWN;
+ }
+
+ ret = skip_isoc_td(xhci, td, event, ep, &status);
+ goto cleanup;
+ }
+
+ if (ep->skip) {
xhci_dbg(xhci, "Found td. Clear skip flag.\n");
ep->skip = false;
}
- if (!event_seg &&
- (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
- /* HC is busted, give up! */
- xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
- "part of current TD\n");
- return -ESHUTDOWN;
- }
- if (event_seg) {
- event_trb = &event_seg->trbs[(event_dma -
- event_seg->dma) / sizeof(*event_trb)];
- /*
- * No-op TRB should not trigger interrupts.
- * If event_trb is a no-op TRB, it means the
- * corresponding TD has been cancelled. Just ignore
- * the TD.
- */
- if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
- == TRB_TYPE(TRB_TR_NOOP)) {
- xhci_dbg(xhci, "event_trb is a no-op TRB. "
- "Skip it\n");
- goto cleanup;
- }
+ event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
+ sizeof(*event_trb)];
+ /*
+ * No-op TRB should not trigger interrupts.
+ * If event_trb is a no-op TRB, it means the
+ * corresponding TD has been cancelled. Just ignore
+ * the TD.
+ */
+ if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
+ == TRB_TYPE(TRB_TR_NOOP)) {
+ xhci_dbg(xhci,
+ "event_trb is a no-op TRB. Skip it\n");
+ goto cleanup;
}
/* Now update the urb's actual_length and give back to
@@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
+ if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_quirk_pll_disable();
+ }
+ xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
+
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 196e0181b2ed..81b976e45880 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd)
del_timer_sync(&xhci->event_ring_timer);
#endif
+ if (xhci->quirks & XHCI_AMD_PLL_FIX)
+ usb_amd_dev_put();
+
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* If restore operation fails, re-initialize the HC during resume */
if ((temp & STS_SRE) || hibernated) {
- usb_root_hub_lost_power(hcd->self.root_hub);
+ /* Let the USB core know _both_ roothubs lost power. */
+ usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
+ usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci);
@@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; ++i) {
- if (!virt_dev->eps[i].ring)
- continue;
- xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
- last_freed_endpoint = i;
+ struct xhci_virt_ep *ep = &virt_dev->eps[i];
+
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ xhci_free_stream_info(xhci, ep->stream_info);
+ ep->stream_info = NULL;
+ ep->ep_state &= ~EP_HAS_STREAMS;
+ }
+
+ if (ep->ring) {
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ last_freed_endpoint = i;
+ }
}
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 07e263063e37..ba1be6b7cc6d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -30,6 +30,7 @@
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
+#include "pci-quirks.h"
/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET (0x60)
@@ -232,7 +233,7 @@ struct xhci_op_regs {
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
-#define ENABLE_DEV_NOTE(x) (1 << x)
+#define ENABLE_DEV_NOTE(x) (1 << (x))
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
@@ -348,6 +349,9 @@ struct xhci_op_regs {
/* Initiate a warm port reset - complete when PORT_WRC is '1' */
#define PORT_WR (1 << 31)
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
/* Port Power Management Status and Control - port_power_base bitmasks */
/* Inactivity timer value for transitions into U1, in microseconds.
* Timeout can be up to 127us. 0xFF means an infinite timeout.
@@ -601,11 +605,11 @@ struct xhci_ep_ctx {
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
/* Mult - Max number of burtst within an interval, in EP companion desc. */
-#define EP_MULT(p) ((p & 0x3) << 8)
+#define EP_MULT(p) (((p) & 0x3) << 8)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
-#define EP_INTERVAL(p) ((p & 0xff) << 16)
+#define EP_INTERVAL(p) (((p) & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
#define EP_MAXPSTREAMS_MASK (0x1f << 10)
#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
@@ -1276,6 +1280,7 @@ struct xhci_hcd {
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
+#define XHCI_AMD_PLL_FIX (1 << 3)
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 4cbb7e4b368d..74073b363c30 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -14,7 +14,7 @@ config USB_MUSB_HDRC
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
- tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+ bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -30,8 +30,8 @@ config USB_MUSB_HDRC
If you do not know what this is, please say N.
- To compile this driver as a module, choose M here; the
- module will be called "musb-hdrc".
+# To compile this driver as a module, choose M here; the
+# module will be called "musb-hdrc".
choice
prompt "Platform Glue Layer"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 52312e8af213..8e2a1ff8a35a 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -21,6 +21,7 @@
#include <asm/cacheflush.h>
#include "musb_core.h"
+#include "musbhsdma.h"
#include "blackfin.h"
struct bfin_glue {
@@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
return -EIO;
}
+static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
+ u16 packet_sz, u8 *mode,
+ dma_addr_t *dma_addr, u32 *len)
+{
+ struct musb_dma_channel *musb_channel = channel->private_data;
+
+ /*
+ * Anomaly 05000450 might cause data corruption when using DMA
+ * MODE 1 transmits with short packet. So to work around this,
+ * we truncate all MODE 1 transfers down to a multiple of the
+ * max packet size, and then do the last short packet transfer
+ * (if there is any) using MODE 0.
+ */
+ if (ANOMALY_05000450) {
+ if (musb_channel->transmit && *mode == 1)
+ *len = *len - (*len % packet_sz);
+ }
+
+ return 0;
+}
+
static void bfin_musb_reg_init(struct musb *musb)
{
if (ANOMALY_05000346) {
@@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = {
.vbus_status = bfin_musb_vbus_status,
.set_vbus = bfin_musb_set_vbus,
+
+ .adjust_channel_params = bfin_musb_adjust_channel_params,
};
static u64 bfin_dmamask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index de55a3c3259a..ab434fbd8c35 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
length = min(n_bds * maxpacket, length);
}
- DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+ DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
tx->index,
maxpacket,
rndis ? "rndis" : "transparent",
n_bds,
- addr, length);
+ (unsigned long long)addr, length);
cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
@@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
length = min(n_bds * maxpacket, length);
DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
- "dma 0x%x len %u %u/%u\n",
+ "dma 0x%llx len %u %u/%u\n",
rx->index, maxpacket,
onepacket
? (is_rndis ? "rndis" : "onepacket")
@@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
musb_readl(tibase,
DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
& 0xffff,
- addr, length, rx->channel.actual_len, rx->buf_len);
+ (unsigned long long)addr, length,
+ rx->channel.actual_len, rx->buf_len);
/* only queue one segment at a time, since the hardware prevents
* correct queue shutdown after unexpected short packets
@@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
if (!completed && (bd->hw_options & CPPI_OWN_SET))
break;
- DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+ DBG(5, "C/RXBD %llx: nxt %08x buf %08x "
"off.len %08x opt.len %08x (%d)\n",
- bd->dma, bd->hw_next, bd->hw_bufp,
+ (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
bd->hw_off_len, bd->hw_options,
rx->channel.actual_len);
@@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
musb_ep_select(cppi->mregs, rx->index + 1);
csr = musb_readw(regs, MUSB_RXCSR);
if (csr & MUSB_RXCSR_DMAENAB) {
- DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+ DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n",
rx->index,
rx->head, rx->tail,
rx->last_processed
- ? rx->last_processed->dma
+ ? (unsigned long long)
+ rx->last_processed->dma
: 0,
completed ? ", completed" : "",
csr);
@@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
- if (!tx && !rx)
+ if (!tx && !rx) {
+ if (cppi->irq)
+ spin_unlock_irqrestore(&musb->lock, flags);
return IRQ_NONE;
+ }
DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
@@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
*/
if (NULL == bd) {
DBG(1, "null BD\n");
- tx_ram->tx_complete = 0;
+ musb_writel(&tx_ram->tx_complete, 0, 0);
continue;
}
@@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
* compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
- cppi_ch->head = 0;
+ cppi_ch->head = NULL;
musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 630ae7f3cd4c..f10ff00ca09e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev)
struct musb *musb = dev_to_musb(&pdev->dev);
unsigned long flags;
+ pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, flags);
musb_platform_disable(musb);
musb_generic_disable(musb);
@@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev)
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_platform_exit(musb);
+ pm_runtime_put(musb->controller);
/* FIXME power down */
}
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 4bd9e2145ee4..0e053b587960 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -261,6 +261,7 @@ enum musb_g_ep0_state {
* @try_ilde: tries to idle the IP
* @vbus_status: returns vbus status if possible
* @set_vbus: forces vbus status
+ * @channel_program: pre check for standard dma channel_program func
*/
struct musb_platform_ops {
int (*init)(struct musb *musb);
@@ -274,6 +275,10 @@ struct musb_platform_ops {
int (*vbus_status)(struct musb *musb);
void (*set_vbus)(struct musb *musb, int on);
+
+ int (*adjust_channel_params)(struct dma_channel *channel,
+ u16 packet_sz, u8 *mode,
+ dma_addr_t *dma_addr, u32 *len);
};
/*
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 98519c5d8b5c..6dfbf9ffd7a6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
- MUSB_TXCSR_TXPKTRDY);
+ MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
musb_writew(epio, MUSB_TXCSR, csr);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
@@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
}
/* if the hardware doesn't have the request, easy ... */
- if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+ if (musb_ep->req_list.next != &req->list || musb_ep->busy)
musb_g_giveback(musb_ep, request, -ECONNRESET);
/* ... else abort the dma transfer ... */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 0144a2d481fd..d281792db05c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel,
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
+ /* Let targets check/tweak the arguments */
+ if (musb->ops->adjust_channel_params) {
+ int ret = musb->ops->adjust_channel_params(channel,
+ packet_sz, &mode, &dma_addr, &len);
+ if (ret)
+ return ret;
+ }
+
/*
* The DMA engine in RTL1.8 and above cannot handle
* DMA addresses that are not aligned to a 4 byte boundary.
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 25cb8b0003b1..57a27fa954b4 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb,
case USB_EVENT_VBUS:
DBG(4, "VBUS Connect\n");
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (musb->gadget_driver)
pm_runtime_get_sync(musb->controller);
-
+#endif
otg_init(musb->xceiv);
break;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index d6384e4aeef9..f7e04bf34a13 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev)
}
musb->dev.parent = &pdev->dev;
+ musb->dev.dma_mask = pdev->dev.dma_mask;
+ musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
glue->dev = &pdev->dev;
glue->musb = musb;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a973c7a29d6e..4de6ef0ae52a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
* /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
*/
static struct usb_device_id id_table_combined [] = {
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
@@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
{ USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
{ USB_DEVICE(OCT_VID, OCT_US101_PID) },
+ { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
.driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
@@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
+ { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
{ USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c543e55bafba..efffc23723bd 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -300,6 +300,8 @@
* Hameg HO820 and HO870 interface (using VID 0x0403)
*/
#define HAMEG_HO820_PID 0xed74
+#define HAMEG_HO730_PID 0xed73
+#define HAMEG_HO720_PID 0xed72
#define HAMEG_HO870_PID 0xed71
/*
@@ -572,6 +574,7 @@
/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
+#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */
#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
/*
@@ -1141,3 +1144,12 @@
#define QIHARDWARE_VID 0x20B7
#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
+/*
+ * CTI GmbH RS485 Converter http://www.cti-lean.com/
+ */
+/* USB-485-Mini*/
+#define FTDI_CTI_MINI_PID 0xF608
+/* USB-Nano-485*/
+#define FTDI_CTI_NANO_PID 0xF60B
+
+
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 75c7f456eed5..d77ff0435896 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb);
/* ONDA MT825UP HSDPA 14.2 modem */
#define ONDA_MT825UP 0x000b
+/* Samsung products */
+#define SAMSUNG_VENDOR_ID 0x04e8
+#define SAMSUNG_PRODUCT_GT_B3730 0x6889
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 8858201eb1d3..54a9dab1f33b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
ifnum = intf->desc.bInterfaceNumber;
dbg("This Interface = %d", ifnum);
- data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private),
+ data = kzalloc(sizeof(struct usb_wwan_intf_private),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
dbg("QDL port found");
- if (serial->interface->num_altsetting == 1)
- return 0;
+ if (serial->interface->num_altsetting == 1) {
+ retval = 0; /* Success */
+ break;
+ }
retval = usb_set_interface(serial->dev, ifnum, 1);
if (retval < 0) {
@@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
retval = -ENODEV;
kfree(data);
}
- return retval;
}
break;
@@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
+ kfree(data);
}
} else if (ifnum == 2) {
dbg("Modem port found");
@@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
retval = -ENODEV;
kfree(data);
}
- return retval;
} else if (ifnum==3) {
/*
* NMEA (serial line 9600 8N1)
@@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
"Could not set interface, error %d\n",
retval);
retval = -ENODEV;
+ kfree(data);
}
}
break;
@@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
dev_err(&serial->dev->dev,
"unknown number of interfaces: %d\n", nintf);
kfree(data);
- return -ENODEV;
+ retval = -ENODEV;
}
+ /* Set serial->private if not returning -ENODEV */
+ if (retval != -ENODEV)
+ usb_set_serial_data(serial, data);
return retval;
}
+static void qc_release(struct usb_serial *serial)
+{
+ struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+ dbg("%s", __func__);
+
+ /* Call usb_wwan release & free the private data allocated in qcprobe */
+ usb_wwan_release(serial);
+ usb_set_serial_data(serial, NULL);
+ kfree(priv);
+}
+
static struct usb_serial_driver qcdevice = {
.driver = {
.owner = THIS_MODULE,
@@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = {
.chars_in_buffer = usb_wwan_chars_in_buffer,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
- .release = usb_wwan_release,
+ .release = qc_release,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index a2e5b5100ab4..0f4e8c942f9e 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
switch (val) {
case CPUFREQ_PRECHANGE:
- if (!fbi->overlay[0].usage && !fbi->overlay[1].usage)
+#ifdef CONFIG_FB_PXA_OVERLAY
+ if (!(fbi->overlay[0].usage || fbi->overlay[1].usage))
+#endif
set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE);
break;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 4fb5b2bf2348..4bcc8b82640b 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -590,15 +590,10 @@ static struct virtio_config_ops virtio_pci_config_ops = {
static void virtio_pci_release_dev(struct device *_d)
{
- struct virtio_device *dev = container_of(_d, struct virtio_device, dev);
+ struct virtio_device *dev = container_of(_d, struct virtio_device,
+ dev);
struct virtio_pci_device *vp_dev = to_vp_device(dev);
- struct pci_dev *pci_dev = vp_dev->pci_dev;
- vp_del_vqs(dev);
- pci_set_drvdata(pci_dev, NULL);
- pci_iounmap(pci_dev, vp_dev->ioaddr);
- pci_release_regions(pci_dev);
- pci_disable_device(pci_dev);
kfree(vp_dev);
}
@@ -681,6 +676,12 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
unregister_virtio_device(&vp_dev->vdev);
+
+ vp_del_vqs(&vp_dev->vdev);
+ pci_set_drvdata(pci_dev, NULL);
+ pci_iounmap(pci_dev, vp_dev->ioaddr);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cc2f73e03475..b0043fb26a4d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
/* detach_buf clears data, so grab it now. */
buf = vq->data[i];
detach_buf(vq, i);
+ vq->vring.avail->idx--;
END_USE(vq);
return buf;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 42d6c930cc87..33167b43ac7e 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -912,8 +912,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
unsigned long irqflags,
const char *devname, void *dev_id)
{
- unsigned int irq;
- int retval;
+ int irq, retval;
irq = bind_evtchn_to_irq(evtchn);
if (irq < 0)
@@ -955,8 +954,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
- unsigned int irq;
- int retval;
+ int irq, retval;
irq = bind_virq_to_irq(virq, cpu);
if (irq < 0)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 95143dd6904d..a2eee574784e 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -8,6 +8,7 @@
#include <linux/sysrq.h>
#include <linux/stop_machine.h>
#include <linux/freezer.h>
+#include <linux/syscore_ops.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -61,7 +62,7 @@ static void xen_post_suspend(int cancelled)
xen_mm_unpin_all();
}
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
static int xen_suspend(void *data)
{
struct suspend_info *si = data;
@@ -70,8 +71,13 @@ static int xen_suspend(void *data)
BUG_ON(!irqs_disabled());
err = sysdev_suspend(PMSG_FREEZE);
+ if (!err) {
+ err = syscore_suspend();
+ if (err)
+ sysdev_resume();
+ }
if (err) {
- printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
+ printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n",
err);
return err;
}
@@ -95,6 +101,7 @@ static int xen_suspend(void *data)
xen_timer_resume();
}
+ syscore_resume();
sysdev_resume();
return 0;
@@ -173,7 +180,7 @@ out:
#endif
shutting_down = SHUTDOWN_INVALID;
}
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
struct shutdown_handler {
const char *command;
@@ -202,7 +209,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
{ "poweroff", do_poweroff },
{ "halt", do_poweroff },
{ "reboot", do_reboot },
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_HIBERNATE_CALLBACKS
{ "suspend", do_suspend },
#endif
{NULL, NULL},
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 0ee594569dcc..85b67ffa2a43 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
{
- int err, flags;
+ int err;
struct p9_fid *fid;
- struct v9fs_session_info *v9ses;
- v9ses = v9fs_dentry2v9ses(dentry);
fid = v9fs_fid_clone_with_uid(dentry, 0);
if (IS_ERR(fid))
goto error_out;
@@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
* dirty pages. We always request for the open fid in read-write
* mode so that a partial page write which result in page
* read can work.
- *
- * we don't have a tsyncfs operation for older version
- * of protocol. So make sure the write back fid is
- * opened in O_SYNC mode.
*/
- if (!v9fs_proto_dotl(v9ses))
- flags = O_RDWR | O_SYNC;
- else
- flags = O_RDWR;
-
- err = p9_client_open(fid, flags);
+ err = p9_client_open(fid, O_RDWR);
if (err < 0) {
p9_client_clunk(fid);
fid = ERR_PTR(err);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 9665c2b840e6..e5ebedfc5ed8 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,7 +116,6 @@ struct v9fs_session_info {
struct list_head slist; /* list of sessions registered with v9fs */
struct backing_dev_info bdi;
struct rw_semaphore rename_sem;
- struct p9_fid *root_fid; /* Used for file system sync */
};
/* cache_validity flags */
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index b6a3b9f7fe4d..e022890c6f40 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
retval = v9fs_refresh_inode_dotl(fid, inode);
else
retval = v9fs_refresh_inode(fid, inode);
- if (retval <= 0)
+ if (retval == -ENOENT)
+ return 0;
+ if (retval < 0)
return retval;
}
out_valid:
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index ffbb113d5f33..82a7c38ddad0 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
fid = v9fs_fid_lookup(dentry);
if (IS_ERR(fid)) {
__putname(link);
- link = ERR_PTR(PTR_ERR(fid));
+ link = ERR_CAST(fid);
goto ndset;
}
retval = p9_client_readlink(fid, &target);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f3eed3383e4f..feef6cdc1fd2 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
retval = PTR_ERR(inode);
goto release_sb;
}
+
root = d_alloc_root(inode);
if (!root) {
iput(inode);
@@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
p9stat_free(st);
kfree(st);
}
- v9fs_fid_add(root, fid);
retval = v9fs_get_acl(inode, fid);
if (retval)
goto release_sb;
- /*
- * Add the root fid to session info. This is used
- * for file system sync. We want a cloned fid here
- * so that we can do a sync_filesystem after a
- * shrink_dcache_for_umount
- */
- v9ses->root_fid = v9fs_fid_clone(root);
- if (IS_ERR(v9ses->root_fid)) {
- retval = PTR_ERR(v9ses->root_fid);
- goto release_sb;
- }
+ v9fs_fid_add(root, fid);
P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
return dget(sb->s_root);
@@ -210,11 +200,15 @@ close_session:
v9fs_session_close(v9ses);
kfree(v9ses);
return ERR_PTR(retval);
+
release_sb:
/*
- * we will do the session_close and root dentry
- * release in the below call.
+ * we will do the session_close and root dentry release
+ * in the below call. But we need to clunk fid, because we haven't
+ * attached the fid to dentry so it won't get clunked
+ * automatically.
*/
+ p9_client_clunk(fid);
deactivate_locked_super(sb);
return ERR_PTR(retval);
}
@@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s)
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
kill_anon_super(s);
- p9_client_clunk(v9ses->root_fid);
+
v9fs_session_cancel(v9ses);
v9fs_session_close(v9ses);
kfree(v9ses);
@@ -285,14 +279,6 @@ done:
return res;
}
-static int v9fs_sync_fs(struct super_block *sb, int wait)
-{
- struct v9fs_session_info *v9ses = sb->s_fs_info;
-
- P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
- return p9_client_sync_fs(v9ses->root_fid);
-}
-
static int v9fs_drop_inode(struct inode *inode)
{
struct v9fs_session_info *v9ses;
@@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode)
return 1;
}
+static int v9fs_write_inode(struct inode *inode,
+ struct writeback_control *wbc)
+{
+ int ret;
+ struct p9_wstat wstat;
+ struct v9fs_inode *v9inode;
+ /*
+ * send an fsync request to server irrespective of
+ * wbc->sync_mode.
+ */
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+ v9inode = V9FS_I(inode);
+ if (!v9inode->writeback_fid)
+ return 0;
+ v9fs_blank_wstat(&wstat);
+
+ ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
+ if (ret < 0) {
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return ret;
+ }
+ return 0;
+}
+
+static int v9fs_write_inode_dotl(struct inode *inode,
+ struct writeback_control *wbc)
+{
+ int ret;
+ struct v9fs_inode *v9inode;
+ /*
+ * send an fsync request to server irrespective of
+ * wbc->sync_mode.
+ */
+ P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
+ v9inode = V9FS_I(inode);
+ if (!v9inode->writeback_fid)
+ return 0;
+ ret = p9_client_fsync(v9inode->writeback_fid, 0);
+ if (ret < 0) {
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return ret;
+ }
+ return 0;
+}
+
static const struct super_operations v9fs_super_ops = {
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
@@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = {
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
+ .write_inode = v9fs_write_inode,
};
static const struct super_operations v9fs_super_ops_dotl = {
.alloc_inode = v9fs_alloc_inode,
.destroy_inode = v9fs_destroy_inode,
- .sync_fs = v9fs_sync_fs,
.statfs = v9fs_statfs,
.drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = generic_show_options,
.umount_begin = v9fs_umount_begin,
+ .write_inode = v9fs_write_inode_dotl,
};
struct file_system_type v9fs_fs_type = {
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f34078d702d3..303983fabfd6 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -941,9 +941,13 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->start_stack = bprm->p;
#ifdef arch_randomize_brk
- if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
+ if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
current->mm->brk = current->mm->start_brk =
arch_randomize_brk(current->mm);
+#ifdef CONFIG_COMPAT_BRK
+ current->brk_randomized = 1;
+#endif
+ }
#endif
if (current->personality & MMAP_PAGE_ZERO) {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index de34bfad9ec3..5d505aaa72fb 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
if (value) {
acl = posix_acl_from_xattr(value, size);
- if (acl == NULL) {
- value = NULL;
- size = 0;
+ if (acl) {
+ ret = posix_acl_valid(acl);
+ if (ret)
+ goto out;
} else if (IS_ERR(acl)) {
return PTR_ERR(acl);
}
}
ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
-
+out:
posix_acl_release(acl);
return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3458b5725540..2e61fe1b6b8c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -740,8 +740,10 @@ struct btrfs_space_info {
*/
unsigned long reservation_progress;
- int full; /* indicates that we cannot allocate any more
+ int full:1; /* indicates that we cannot allocate any more
chunks for this space */
+ int chunk_alloc:1; /* set if we are allocating a chunk */
+
int force_alloc; /* set if we need to force a chunk alloc for
this space */
@@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
+void btrfs_drop_pages(struct page **pages, size_t num_pages);
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached);
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8f1d44ba332f..68c84c8c24bd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3057,7 +3057,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
btrfs_destroy_pinned_extent(root,
root->fs_info->pinned_extents);
- t->use_count = 0;
+ atomic_set(&t->use_count, 0);
list_del_init(&t->list);
memset(t, 0, sizeof(*t));
kmem_cache_free(btrfs_transaction_cachep, t);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f619c3cb13b7..31f33ba56fe8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -33,6 +33,25 @@
#include "locking.h"
#include "free-space-cache.h"
+/* control flags for do_chunk_alloc's force field
+ * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
+ * if we really need one.
+ *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_LIMITED means to only try and allocate one
+ * if we have very few chunks already allocated. This is
+ * used as part of the clustering code to help make sure
+ * we have a good pool of storage to cluster in, without
+ * filling the FS with empty chunks
+ *
+ */
+enum {
+ CHUNK_ALLOC_NO_FORCE = 0,
+ CHUNK_ALLOC_FORCE = 1,
+ CHUNK_ALLOC_LIMITED = 2,
+};
+
static int update_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
@@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->bytes_readonly = 0;
found->bytes_may_use = 0;
found->full = 0;
- found->force_alloc = 0;
+ found->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ found->chunk_alloc = 0;
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
atomic_set(&found->caching_threads, 0);
@@ -3150,7 +3170,7 @@ again:
if (!data_sinfo->full && alloc_chunk) {
u64 alloc_target;
- data_sinfo->force_alloc = 1;
+ data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&data_sinfo->lock);
alloc:
alloc_target = btrfs_get_alloc_profile(root, 1);
@@ -3160,7 +3180,8 @@ alloc:
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
bytes + 2 * 1024 * 1024,
- alloc_target, 0);
+ alloc_target,
+ CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans, root);
if (ret < 0) {
if (ret != -ENOSPC)
@@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
- found->force_alloc = 1;
+ found->force_alloc = CHUNK_ALLOC_FORCE;
}
rcu_read_unlock();
}
static int should_alloc_chunk(struct btrfs_root *root,
- struct btrfs_space_info *sinfo, u64 alloc_bytes)
+ struct btrfs_space_info *sinfo, u64 alloc_bytes,
+ int force)
{
u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
+ u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
u64 thresh;
- if (sinfo->bytes_used + sinfo->bytes_reserved +
- alloc_bytes + 256 * 1024 * 1024 < num_bytes)
+ if (force == CHUNK_ALLOC_FORCE)
+ return 1;
+
+ /*
+ * in limited mode, we want to have some free space up to
+ * about 1% of the FS size.
+ */
+ if (force == CHUNK_ALLOC_LIMITED) {
+ thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+ thresh = max_t(u64, 64 * 1024 * 1024,
+ div_factor_fine(thresh, 1));
+
+ if (num_bytes - num_allocated < thresh)
+ return 1;
+ }
+
+ /*
+ * we have two similar checks here, one based on percentage
+ * and once based on a hard number of 256MB. The idea
+ * is that if we have a good amount of free
+ * room, don't allocate a chunk. A good mount is
+ * less than 80% utilized of the chunks we have allocated,
+ * or more than 256MB free
+ */
+ if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
return 0;
- if (sinfo->bytes_used + sinfo->bytes_reserved +
- alloc_bytes < div_factor(num_bytes, 8))
+ if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
return 0;
thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+
+ /* 256MB or 5% of the FS */
thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
return 0;
-
return 1;
}
@@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
{
struct btrfs_space_info *space_info;
struct btrfs_fs_info *fs_info = extent_root->fs_info;
+ int wait_for_alloc = 0;
int ret = 0;
- mutex_lock(&fs_info->chunk_mutex);
-
flags = btrfs_reduce_alloc_profile(extent_root, flags);
space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
}
BUG_ON(!space_info);
+again:
spin_lock(&space_info->lock);
if (space_info->force_alloc)
- force = 1;
+ force = space_info->force_alloc;
if (space_info->full) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
}
- if (!force && !should_alloc_chunk(extent_root, space_info,
- alloc_bytes)) {
+ if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
spin_unlock(&space_info->lock);
- goto out;
+ return 0;
+ } else if (space_info->chunk_alloc) {
+ wait_for_alloc = 1;
+ } else {
+ space_info->chunk_alloc = 1;
}
+
spin_unlock(&space_info->lock);
+ mutex_lock(&fs_info->chunk_mutex);
+
+ /*
+ * The chunk_mutex is held throughout the entirety of a chunk
+ * allocation, so once we've acquired the chunk_mutex we know that the
+ * other guy is done and we need to recheck and see if we should
+ * allocate.
+ */
+ if (wait_for_alloc) {
+ mutex_unlock(&fs_info->chunk_mutex);
+ wait_for_alloc = 0;
+ goto again;
+ }
+
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
@@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info->full = 1;
else
ret = 1;
- space_info->force_alloc = 0;
+
+ space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+ space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
-out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return ret;
}
@@ -5303,11 +5368,13 @@ loop:
if (allowed_chunk_alloc) {
ret = do_chunk_alloc(trans, root, num_bytes +
- 2 * 1024 * 1024, data, 1);
+ 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_LIMITED);
allowed_chunk_alloc = 0;
done_chunk_alloc = 1;
- } else if (!done_chunk_alloc) {
- space_info->force_alloc = 1;
+ } else if (!done_chunk_alloc &&
+ space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
+ space_info->force_alloc = CHUNK_ALLOC_LIMITED;
}
if (loop < LOOP_NO_EMPTY_SIZE) {
@@ -5393,7 +5460,8 @@ again:
*/
if (empty_size || root->ref_cows)
ret = do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes + 2 * 1024 * 1024, data, 0);
+ num_bytes + 2 * 1024 * 1024, data,
+ CHUNK_ALLOC_NO_FORCE);
WARN_ON(num_bytes < root->sectorsize);
ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5405,7 +5473,7 @@ again:
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
do_chunk_alloc(trans, root->fs_info->extent_root,
- num_bytes, data, 1);
+ num_bytes, data, CHUNK_ALLOC_FORCE);
goto again;
}
if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
@@ -8109,13 +8177,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
alloc_flags = update_block_group_flags(root, cache->flags);
if (alloc_flags != cache->flags)
- do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
ret = set_block_group_ro(cache);
if (!ret)
goto out;
alloc_flags = get_alloc_profile(root, cache->space_info->flags);
- ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
if (ret < 0)
goto out;
ret = set_block_group_ro(cache);
@@ -8128,7 +8198,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type)
{
u64 alloc_flags = get_alloc_profile(root, type);
- return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
+ return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+ CHUNK_ALLOC_FORCE);
}
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 20ddb28602a8..315138605088 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
}
}
+static void uncache_state(struct extent_state **cached_ptr)
+{
+ if (cached_ptr && (*cached_ptr)) {
+ struct extent_state *state = *cached_ptr;
+ *cached_ptr = NULL;
+ free_extent_state(state);
+ }
+}
+
/*
* set some bits on a range in the tree. This may require allocations or
* sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
}
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+ struct extent_state **cached_state, gfp_t mask)
{
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
- NULL, mask);
+ return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
+ NULL, cached_state, mask);
}
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
mask);
}
-int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
+int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
do {
struct page *page = bvec->bv_page;
+ struct extent_state *cached = NULL;
+ struct extent_state *state;
+
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (++bvec <= bvec_end)
prefetchw(&bvec->bv_page->flags);
+ spin_lock(&tree->lock);
+ state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
+ if (state && state->start == start) {
+ /*
+ * take a reference on the state, unlock will drop
+ * the ref
+ */
+ cache_state(state, &cached);
+ }
+ spin_unlock(&tree->lock);
+
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
- NULL);
+ state);
if (ret)
uptodate = 0;
}
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
test_bit(BIO_UPTODATE, &bio->bi_flags);
if (err)
uptodate = 0;
+ uncache_state(&cached);
continue;
}
}
if (uptodate) {
- set_extent_uptodate(tree, start, end,
+ set_extent_uptodate(tree, start, end, &cached,
GFP_ATOMIC);
}
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
if (whole_page) {
if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
do {
struct page *page = bvec->bv_page;
+ struct extent_state *cached = NULL;
tree = &BTRFS_I(page->mapping->host)->io_tree;
start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
prefetchw(&bvec->bv_page->flags);
if (uptodate) {
- set_extent_uptodate(tree, start, end, GFP_ATOMIC);
+ set_extent_uptodate(tree, start, end, &cached,
+ GFP_ATOMIC);
} else {
ClearPageUptodate(page);
SetPageError(page);
}
- unlock_extent(tree, start, end, GFP_ATOMIC);
+ unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
} while (bvec >= bio->bi_io_vec);
@@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
while (cur <= end) {
if (cur >= last_byte) {
char *userpage;
+ struct extent_state *cached = NULL;
+
iosize = PAGE_CACHE_SIZE - page_offset;
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
break;
}
em = get_extent(inode, page, page_offset, cur,
@@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
/* we've found a hole, just zero and go on */
if (block_start == EXTENT_MAP_HOLE) {
char *userpage;
+ struct extent_state *cached = NULL;
+
userpage = kmap_atomic(page, KM_USER0);
memset(userpage + page_offset, 0, iosize);
flush_dcache_page(page);
kunmap_atomic(userpage, KM_USER0);
set_extent_uptodate(tree, cur, cur + iosize - 1,
- GFP_NOFS);
- unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
+ &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur, cur + iosize - 1,
+ &cached, GFP_NOFS);
cur = cur + iosize;
page_offset += iosize;
continue;
@@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
iocount++;
block_start = block_start + iosize;
} else {
- set_extent_uptodate(tree, block_start, cur_end,
+ struct extent_state *cached = NULL;
+
+ set_extent_uptodate(tree, block_start, cur_end, &cached,
GFP_NOFS);
- unlock_extent(tree, block_start, cur_end, GFP_NOFS);
+ unlock_extent_cached(tree, block_start, cur_end,
+ &cached, GFP_NOFS);
block_start = cur_end + 1;
}
page_offset = block_start & (PAGE_CACHE_SIZE - 1);
@@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
num_pages = num_extent_pages(eb->start, eb->len);
set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
- GFP_NOFS);
+ NULL, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
page = extent_buffer_page(eb, i);
if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
kunmap_atomic(dst_kaddr, KM_USER0);
}
+static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
+{
+ unsigned long distance = (src > dst) ? src - dst : dst - src;
+ return distance < len;
+}
+
static void copy_pages(struct page *dst_page, struct page *src_page,
unsigned long dst_off, unsigned long src_off,
unsigned long len)
@@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
char *src_kaddr;
- if (dst_page != src_page)
+ if (dst_page != src_page) {
src_kaddr = kmap_atomic(src_page, KM_USER1);
- else
+ } else {
src_kaddr = dst_kaddr;
+ BUG_ON(areas_overlap(src_off, dst_off, len));
+ }
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
"len %lu len %lu\n", dst_offset, len, dst->len);
BUG_ON(1);
}
- if (dst_offset < src_offset) {
+ if (!areas_overlap(src_offset, dst_offset, len)) {
memcpy_extent_buffer(dst, dst_offset, src_offset, len);
return;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index f62c5442835d..af2d7179c372 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int exclusive_bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask);
+ struct extent_state **cached_state, gfp_t mask);
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e621ea54a3fd..75899a01dded 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
/*
* unlocks pages after btrfs_file_write is done with them
*/
-static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
+void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
@@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
-static noinline int dirty_and_release_pages(struct btrfs_root *root,
- struct file *file,
- struct page **pages,
- size_t num_pages,
- loff_t pos,
- size_t write_bytes)
+int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
+ struct page **pages, size_t num_pages,
+ loff_t pos, size_t write_bytes,
+ struct extent_state **cached)
{
int err = 0;
int i;
- struct inode *inode = fdentry(file)->d_inode;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
@@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root,
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- NULL);
+ cached);
if (err)
return err;
@@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
}
if (copied > 0) {
- ret = dirty_and_release_pages(root, file, pages,
- dirty_pages, pos,
- copied);
+ ret = btrfs_dirty_pages(root, inode, pages,
+ dirty_pages, pos, copied,
+ NULL);
if (ret) {
btrfs_delalloc_release_space(inode,
dirty_pages << PAGE_CACHE_SHIFT);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index f561c953205b..11d2e9cea09e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct inode *inode;
struct rb_node *node;
struct list_head *pos, *n;
+ struct page **pages;
struct page *page;
struct extent_state *cached_state = NULL;
struct btrfs_free_cluster *cluster = NULL;
@@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
u64 start, end, len;
u64 bytes = 0;
u32 *crc, *checksums;
- pgoff_t index = 0, last_index = 0;
unsigned long first_page_offset;
- int num_checksums;
+ int index = 0, num_pages = 0;
int entries = 0;
int bitmaps = 0;
int ret = 0;
bool next_page = false;
+ bool out_of_space = false;
root = root->fs_info->tree_root;
@@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root,
return 0;
}
- last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
+ num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+ PAGE_CACHE_SHIFT;
filemap_write_and_wait(inode->i_mapping);
btrfs_wait_ordered_range(inode, inode->i_size &
~(root->sectorsize - 1), (u64)-1);
/* We need a checksum per page. */
- num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE;
- crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
+ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
if (!crc) {
iput(inode);
return 0;
}
+ pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
+ if (!pages) {
+ kfree(crc);
+ iput(inode);
+ return 0;
+ }
+
/* Since the first page has all of our checksums and our generation we
* need to calculate the offset into the page that we can start writing
* our entries.
*/
- first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
+ first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
/* Get the cluster for this block_group if it exists */
if (!list_empty(&block_group->cluster_list))
@@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
* after find_get_page at this point. Just putting this here so people
* know and don't freak out.
*/
- while (index <= last_index) {
+ while (index < num_pages) {
page = grab_cache_page(inode->i_mapping, index);
if (!page) {
- pgoff_t i = 0;
+ int i;
- while (i < index) {
- page = find_get_page(inode->i_mapping, i);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
- i++;
+ for (i = 0; i < num_pages; i++) {
+ unlock_page(pages[i]);
+ page_cache_release(pages[i]);
}
goto out_free;
}
+ pages[index] = page;
index++;
}
@@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
offset = start_offset;
}
- page = find_get_page(inode->i_mapping, index);
+ if (index >= num_pages) {
+ out_of_space = true;
+ break;
+ }
+
+ page = pages[index];
addr = kmap(page);
entry = addr + start_offset;
@@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
bytes += PAGE_CACHE_SIZE;
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
-
- /*
- * We need to release our reference we got for grab_cache_page,
- * except for the first page which will hold our checksums, we
- * do that below.
- */
- if (index != 0) {
- unlock_page(page);
- page_cache_release(page);
- }
-
- page_cache_release(page);
-
index++;
} while (node || next_page);
@@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list);
- page = find_get_page(inode->i_mapping, index);
+ if (index >= num_pages) {
+ out_of_space = true;
+ break;
+ }
+ page = pages[index];
addr = kmap(page);
memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
@@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root,
crc++;
bytes += PAGE_CACHE_SIZE;
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
list_del_init(&entry->list);
index++;
}
+ if (out_of_space) {
+ btrfs_drop_pages(pages, num_pages);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+ i_size_read(inode) - 1, &cached_state,
+ GFP_NOFS);
+ ret = 0;
+ goto out_free;
+ }
+
/* Zero out the rest of the pages just to make sure */
- while (index <= last_index) {
+ while (index < num_pages) {
void *addr;
- page = find_get_page(inode->i_mapping, index);
-
+ page = pages[index];
addr = kmap(page);
memset(addr, 0, PAGE_CACHE_SIZE);
kunmap(page);
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
bytes += PAGE_CACHE_SIZE;
index++;
}
- btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
-
/* Write the checksums and trans id to the first page */
{
void *addr;
u64 *gen;
- page = find_get_page(inode->i_mapping, 0);
+ page = pages[0];
addr = kmap(page);
- memcpy(addr, checksums, sizeof(u32) * num_checksums);
- gen = addr + (sizeof(u32) * num_checksums);
+ memcpy(addr, checksums, sizeof(u32) * num_pages);
+ gen = addr + (sizeof(u32) * num_pages);
*gen = trans->transid;
kunmap(page);
- ClearPageChecked(page);
- set_page_extent_mapped(page);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- page_cache_release(page);
- page_cache_release(page);
}
- BTRFS_I(inode)->generation = trans->transid;
+ ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
+ bytes, &cached_state);
+ btrfs_drop_pages(pages, num_pages);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+ if (ret) {
+ ret = 0;
+ goto out_free;
+ }
+
+ BTRFS_I(inode)->generation = trans->transid;
+
filemap_write_and_wait(inode->i_mapping);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -853,6 +845,7 @@ out_free:
BTRFS_I(inode)->generation = 0;
}
kfree(checksums);
+ kfree(pages);
btrfs_update_inode(trans, root, inode);
iput(inode);
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5cc64ab9c485..fcd66b6a8086 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1770,9 +1770,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
add_pending_csums(trans, inode, ordered_extent->file_offset,
&ordered_extent->list);
- btrfs_ordered_update_i_size(inode, 0, ordered_extent);
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
+ if (!ret) {
+ ret = btrfs_update_inode(trans, root, inode);
+ BUG_ON(ret);
+ }
+ ret = 0;
out:
if (nolock) {
if (trans)
@@ -2590,6 +2593,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
+ if (!leaf->map_token)
+ map_private_extent_buffer(leaf, (unsigned long)item,
+ sizeof(struct btrfs_inode_item),
+ &leaf->map_token, &leaf->kaddr,
+ &leaf->map_start, &leaf->map_len,
+ KM_USER1);
+
btrfs_set_inode_uid(leaf, item, inode->i_uid);
btrfs_set_inode_gid(leaf, item, inode->i_gid);
btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2618,6 +2628,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
+
+ if (leaf->map_token) {
+ unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
+ leaf->map_token = NULL;
+ }
}
/*
@@ -4207,10 +4222,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
struct btrfs_key found_key;
struct btrfs_path *path;
int ret;
- u32 nritems;
struct extent_buffer *leaf;
int slot;
- int advance;
unsigned char d_type;
int over = 0;
u32 di_cur;
@@ -4253,27 +4266,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- advance = 0;
while (1) {
leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
slot = path->slots[0];
- if (advance || slot >= nritems) {
- if (slot >= nritems - 1) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- slot = path->slots[0];
- } else {
- slot++;
- path->slots[0]++;
- }
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto err;
+ else if (ret > 0)
+ break;
+ continue;
}
- advance = 1;
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
@@ -4282,7 +4287,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
if (btrfs_key_type(&found_key) != key_type)
break;
if (found_key.offset < filp->f_pos)
- continue;
+ goto next;
filp->f_pos = found_key.offset;
@@ -4335,6 +4340,8 @@ skip:
di_cur += di_len;
di = (struct btrfs_dir_item *)((char *)di + di_len);
}
+next:
+ path->slots[0]++;
}
/* Reached end of directory/root. Bump pos past the last item. */
@@ -4527,14 +4534,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BUG_ON(!path);
inode = new_inode(root->fs_info->sb);
- if (!inode)
+ if (!inode) {
+ btrfs_free_path(path);
return ERR_PTR(-ENOMEM);
+ }
if (dir) {
trace_btrfs_inode_request(dir);
ret = btrfs_set_inode_index(dir, index);
if (ret) {
+ btrfs_free_path(path);
iput(inode);
return ERR_PTR(ret);
}
@@ -4834,9 +4844,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
if (inode->i_nlink == ~0U)
return -EMLINK;
- btrfs_inc_nlink(inode);
- inode->i_ctime = CURRENT_TIME;
-
err = btrfs_set_inode_index(dir, &index);
if (err)
goto fail;
@@ -4852,6 +4859,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
goto fail;
}
+ btrfs_inc_nlink(inode);
+ inode->i_ctime = CURRENT_TIME;
+
btrfs_set_trans_block_group(trans, dir);
ihold(inode);
@@ -5221,7 +5231,7 @@ again:
btrfs_mark_buffer_dirty(leaf);
}
set_extent_uptodate(io_tree, em->start,
- extent_map_end(em) - 1, GFP_NOFS);
+ extent_map_end(em) - 1, NULL, GFP_NOFS);
goto insert;
} else {
printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -5428,17 +5438,30 @@ out:
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
+ struct extent_map *em,
u64 start, u64 len)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
- struct extent_map *em;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct btrfs_key ins;
u64 alloc_hint;
int ret;
+ bool insert = false;
- btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+ /*
+ * Ok if the extent map we looked up is a hole and is for the exact
+ * range we want, there is no reason to allocate a new one, however if
+ * it is not right then we need to free this one and drop the cache for
+ * our range.
+ */
+ if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
+ em->len != len) {
+ free_extent_map(em);
+ em = NULL;
+ insert = true;
+ btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
+ }
trans = btrfs_join_transaction(root, 0);
if (IS_ERR(trans))
@@ -5454,10 +5477,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
goto out;
}
- em = alloc_extent_map(GFP_NOFS);
if (!em) {
- em = ERR_PTR(-ENOMEM);
- goto out;
+ em = alloc_extent_map(GFP_NOFS);
+ if (!em) {
+ em = ERR_PTR(-ENOMEM);
+ goto out;
+ }
}
em->start = start;
@@ -5467,9 +5492,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
em->block_start = ins.objectid;
em->block_len = ins.offset;
em->bdev = root->fs_info->fs_devices->latest_bdev;
+
+ /*
+ * We need to do this because if we're using the original em we searched
+ * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
+ */
+ em->flags = 0;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
- while (1) {
+ while (insert) {
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
@@ -5687,8 +5718,7 @@ must_cow:
* it above
*/
len = bh_result->b_size;
- free_extent_map(em);
- em = btrfs_new_extent_direct(inode, start, len);
+ em = btrfs_new_extent_direct(inode, em, start, len);
if (IS_ERR(em))
return PTR_ERR(em);
len = min(len, em->len - (start - em->start));
@@ -5851,8 +5881,10 @@ again:
}
add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
- btrfs_ordered_update_i_size(inode, 0, ordered);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_ordered_update_i_size(inode, 0, ordered);
+ if (!ret)
+ btrfs_update_inode(trans, root, inode);
+ ret = 0;
out_unlock:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
ordered->file_offset + ordered->len - 1,
@@ -5938,7 +5970,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
- u32 *csums)
+ u32 *csums, int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5949,13 +5981,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
if (ret)
goto err;
- if (write && !skip_sum) {
+ if (skip_sum)
+ goto map;
+
+ if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
inode, rw, bio, 0, 0,
file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
+ } else if (write) {
+ /*
+ * If we aren't doing async submit, calculate the csum of the
+ * bio now.
+ */
+ ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
+ if (ret)
+ goto err;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
file_offset, csums);
@@ -5963,7 +6006,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
goto err;
}
- ret = btrfs_map_bio(root, rw, bio, 0, 1);
+map:
+ ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
@@ -5985,15 +6029,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int nr_pages = 0;
u32 *csums = dip->csums;
int ret = 0;
+ int async_submit = 0;
int write = rw & REQ_WRITE;
- bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
- if (!bio)
- return -ENOMEM;
- bio->bi_private = dip;
- bio->bi_end_io = btrfs_end_dio_bio;
- atomic_inc(&dip->pending_bios);
-
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
&map_length, NULL, 0);
@@ -6002,6 +6040,19 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
return -EIO;
}
+ if (map_length >= orig_bio->bi_size) {
+ bio = orig_bio;
+ goto submit;
+ }
+
+ async_submit = 1;
+ bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+ if (!bio)
+ return -ENOMEM;
+ bio->bi_private = dip;
+ bio->bi_end_io = btrfs_end_dio_bio;
+ atomic_inc(&dip->pending_bios);
+
while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
if (unlikely(map_length < submit_len + bvec->bv_len ||
bio_add_page(bio, bvec->bv_page, bvec->bv_len,
@@ -6015,7 +6066,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
- csums);
+ csums, async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
@@ -6052,8 +6103,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
}
}
+submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
- csums);
+ csums, async_submit);
if (!ret)
return 0;
@@ -6148,6 +6200,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
unsigned long nr_segs)
{
int seg;
+ int i;
size_t size;
unsigned long addr;
unsigned blocksize_mask = root->sectorsize - 1;
@@ -6162,8 +6215,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
- if ((addr & blocksize_mask) || (size & blocksize_mask))
+ if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
+
+ /* If this is a write we don't need to check anymore */
+ if (rw & WRITE)
+ continue;
+
+ /*
+ * Check to make sure we don't have duplicate iov_base's in this
+ * iovec, if so return EINVAL, otherwise we'll get csum errors
+ * when reading back.
+ */
+ for (i = seg + 1; i < nr_segs; i++) {
+ if (iov[seg].iov_base == iov[i].iov_base)
+ goto out;
+ }
}
retval = 0;
out:
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index cfc264fefdb0..ffb48d6c5433 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
struct btrfs_ioctl_space_info *dest_orig;
- struct btrfs_ioctl_space_info *user_dest;
+ struct btrfs_ioctl_space_info __user *user_dest;
struct btrfs_space_info *info;
u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
BTRFS_BLOCK_GROUP_SYSTEM,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 58e7de9cc90c..0ac712efcdf2 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -159,7 +159,7 @@ enum {
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
- Opt_enospc_debug, Opt_err,
+ Opt_enospc_debug, Opt_subvolrootid, Opt_err,
};
static match_table_t tokens = {
@@ -189,6 +189,7 @@ static match_table_t tokens = {
{Opt_clear_cache, "clear_cache"},
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
{Opt_enospc_debug, "enospc_debug"},
+ {Opt_subvolrootid, "subvolrootid=%d"},
{Opt_err, NULL},
};
@@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
break;
case Opt_subvol:
case Opt_subvolid:
+ case Opt_subvolrootid:
case Opt_device:
/*
* These are parsed by btrfs_parse_early_options
@@ -388,7 +390,7 @@ out:
*/
static int btrfs_parse_early_options(const char *options, fmode_t flags,
void *holder, char **subvol_name, u64 *subvol_objectid,
- struct btrfs_fs_devices **fs_devices)
+ u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *orig, *p;
@@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
*subvol_objectid = intarg;
}
break;
+ case Opt_subvolrootid:
+ intarg = 0;
+ error = match_int(&args[0], &intarg);
+ if (!error) {
+ /* we want the original fs_tree */
+ if (!intarg)
+ *subvol_rootid =
+ BTRFS_FS_TREE_OBJECTID;
+ else
+ *subvol_rootid = intarg;
+ }
+ break;
case Opt_device:
error = btrfs_scan_one_device(match_strdup(&args[0]),
flags, holder, fs_devices);
@@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
fmode_t mode = FMODE_READ;
char *subvol_name = NULL;
u64 subvol_objectid = 0;
+ u64 subvol_rootid = 0;
int error = 0;
if (!(flags & MS_RDONLY))
@@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
error = btrfs_parse_early_options(data, mode, fs_type,
&subvol_name, &subvol_objectid,
- &fs_devices);
+ &subvol_rootid, &fs_devices);
if (error)
return ERR_PTR(error);
@@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
s->s_flags |= MS_ACTIVE;
}
- root = get_default_root(s, subvol_objectid);
- if (IS_ERR(root)) {
- error = PTR_ERR(root);
- deactivate_locked_super(s);
- goto error_free_subvol_name;
- }
/* if they gave us a subvolume name bind mount into that */
if (strcmp(subvol_name, ".")) {
struct dentry *new_root;
+
+ root = get_default_root(s, subvol_rootid);
+ if (IS_ERR(root)) {
+ error = PTR_ERR(root);
+ deactivate_locked_super(s);
+ goto error_free_subvol_name;
+ }
+
mutex_lock(&root->d_inode->i_mutex);
new_root = lookup_one_len(subvol_name, root,
strlen(subvol_name));
@@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
}
dput(root);
root = new_root;
+ } else {
+ root = get_default_root(s, subvol_objectid);
+ if (IS_ERR(root)) {
+ error = PTR_ERR(root);
+ deactivate_locked_super(s);
+ goto error_free_subvol_name;
+ }
}
kfree(subvol_name);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b158da7e0bb..c571734d5e5a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -32,10 +32,8 @@
static noinline void put_transaction(struct btrfs_transaction *transaction)
{
- WARN_ON(transaction->use_count == 0);
- transaction->use_count--;
- if (transaction->use_count == 0) {
- list_del_init(&transaction->list);
+ WARN_ON(atomic_read(&transaction->use_count) == 0);
+ if (atomic_dec_and_test(&transaction->use_count)) {
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
@@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root)
if (!cur_trans)
return -ENOMEM;
root->fs_info->generation++;
- cur_trans->num_writers = 1;
+ atomic_set(&cur_trans->num_writers, 1);
cur_trans->num_joined = 0;
cur_trans->transid = root->fs_info->generation;
init_waitqueue_head(&cur_trans->writer_wait);
init_waitqueue_head(&cur_trans->commit_wait);
cur_trans->in_commit = 0;
cur_trans->blocked = 0;
- cur_trans->use_count = 1;
+ atomic_set(&cur_trans->use_count, 1);
cur_trans->commit_done = 0;
cur_trans->start_time = get_seconds();
@@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root)
root->fs_info->running_transaction = cur_trans;
spin_unlock(&root->fs_info->new_trans_lock);
} else {
- cur_trans->num_writers++;
+ atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
}
@@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root)
cur_trans = root->fs_info->running_transaction;
if (cur_trans && cur_trans->blocked) {
DEFINE_WAIT(wait);
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
while (1) {
prepare_to_wait(&root->fs_info->transaction_wait, &wait,
TASK_UNINTERRUPTIBLE);
@@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
{
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
+ int retries = 0;
int ret;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -204,7 +203,7 @@ again:
}
cur_trans = root->fs_info->running_transaction;
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
if (type != TRANS_JOIN_NOLOCK)
mutex_unlock(&root->fs_info->trans_mutex);
@@ -224,10 +223,18 @@ again:
if (num_items > 0) {
ret = btrfs_trans_reserve_metadata(h, root, num_items);
- if (ret == -EAGAIN) {
+ if (ret == -EAGAIN && !retries) {
+ retries++;
btrfs_commit_transaction(h, root);
goto again;
+ } else if (ret == -EAGAIN) {
+ /*
+ * We have already retried and got EAGAIN, so really we
+ * don't have space, so set ret to -ENOSPC.
+ */
+ ret = -ENOSPC;
}
+
if (ret < 0) {
btrfs_end_transaction(h, root);
return ERR_PTR(ret);
@@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
goto out_unlock; /* nothing committing|committed */
}
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
wait_for_commit(root, cur_trans);
@@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
wake_up_process(info->transaction_kthread);
}
- if (lock)
- mutex_lock(&info->trans_mutex);
WARN_ON(cur_trans != info->running_transaction);
- WARN_ON(cur_trans->num_writers < 1);
- cur_trans->num_writers--;
+ WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
+ atomic_dec(&cur_trans->num_writers);
smp_mb();
if (waitqueue_active(&cur_trans->writer_wait))
wake_up(&cur_trans->writer_wait);
put_transaction(cur_trans);
- if (lock)
- mutex_unlock(&info->trans_mutex);
if (current->journal_info == trans)
current->journal_info = NULL;
@@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
/* take transaction reference */
mutex_lock(&root->fs_info->trans_mutex);
cur_trans = trans->transaction;
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
@@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_lock(&root->fs_info->trans_mutex);
if (cur_trans->in_commit) {
- cur_trans->use_count++;
+ atomic_inc(&cur_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
btrfs_end_transaction(trans, root);
@@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
prev_trans = list_entry(cur_trans->list.prev,
struct btrfs_transaction, list);
if (!prev_trans->commit_done) {
- prev_trans->use_count++;
+ atomic_inc(&prev_trans->use_count);
mutex_unlock(&root->fs_info->trans_mutex);
wait_for_commit(root, prev_trans);
@@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
TASK_UNINTERRUPTIBLE);
smp_mb();
- if (cur_trans->num_writers > 1)
+ if (atomic_read(&cur_trans->num_writers) > 1)
schedule_timeout(MAX_SCHEDULE_TIMEOUT);
else if (should_grow)
schedule_timeout(1);
mutex_lock(&root->fs_info->trans_mutex);
finish_wait(&cur_trans->writer_wait, &wait);
- } while (cur_trans->num_writers > 1 ||
+ } while (atomic_read(&cur_trans->num_writers) > 1 ||
(should_grow && cur_trans->num_joined != joined));
ret = create_pending_snapshots(trans, root->fs_info);
@@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
wake_up(&cur_trans->commit_wait);
+ list_del_init(&cur_trans->list);
put_transaction(cur_trans);
put_transaction(cur_trans);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 229a594cacd5..e441acc6c584 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -27,11 +27,11 @@ struct btrfs_transaction {
* total writers in this transaction, it must be zero before the
* transaction can end
*/
- unsigned long num_writers;
+ atomic_t num_writers;
unsigned long num_joined;
int in_commit;
- int use_count;
+ atomic_t use_count;
int commit_done;
int blocked;
struct list_head list;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a5303b871b13..cfd660550ded 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dir_item *di;
- int ret = 0, slot, advance;
+ int ret = 0, slot;
size_t total_size = 0, size_left = size;
unsigned long name_ptr;
size_t name_len;
- u32 nritems;
/*
* ok we want all objects associated with this id.
@@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- advance = 0;
+
while (1) {
leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
slot = path->slots[0];
/* this is where we start walking through the path */
- if (advance || slot >= nritems) {
+ if (slot >= btrfs_header_nritems(leaf)) {
/*
* if we've reached the last slot in this leaf we need
* to go to the next leaf and reset everything
*/
- if (slot >= nritems-1) {
- ret = btrfs_next_leaf(root, path);
- if (ret)
- break;
- leaf = path->nodes[0];
- nritems = btrfs_header_nritems(leaf);
- slot = path->slots[0];
- } else {
- /*
- * just walking through the slots on this leaf
- */
- slot++;
- path->slots[0]++;
- }
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto err;
+ else if (ret > 0)
+ break;
+ continue;
}
- advance = 1;
btrfs_item_key_to_cpu(leaf, &found_key, slot);
@@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
/* we are just looking for how big our buffer needs to be */
if (!size)
- continue;
+ goto next;
if (!buffer || (name_len + 1) > size_left) {
ret = -ERANGE;
@@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
size_left -= name_len + 1;
buffer += name_len + 1;
+next:
+ path->slots[0]++;
}
ret = total_size;
diff --git a/fs/cifs/README b/fs/cifs/README
index fe1683590828..74ab165fc646 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -685,22 +685,6 @@ LinuxExtensionsEnabled If set to one then the client will attempt to
support and want to map the uid and gid fields
to values supplied at mount (rather than the
actual values, then set this to zero. (default 1)
-Experimental When set to 1 used to enable certain experimental
- features (currently enables multipage writes
- when signing is enabled, the multipage write
- performance enhancement was disabled when
- signing turned on in case buffer was modified
- just before it was sent, also this flag will
- be used to use the new experimental directory change
- notification code). When set to 2 enables
- an additional experimental feature, "raw ntlmssp"
- session establishment support (which allows
- specifying "sec=ntlmssp" on mount). The Linux cifs
- module will use ntlmv2 authentication encapsulated
- in "raw ntlmssp" (not using SPNEGO) when
- "sec=ntlmssp" is specified on mount.
- This support also requires building cifs with
- the CONFIG_CIFS_EXPERIMENTAL configuration flag.
These experimental features and tracing can be enabled by changing flags in
/proc/fs/cifs (after the cifs module has been installed or built into the
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index e654dfd092c3..53d57a3fe427 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -50,7 +50,7 @@ void cifs_fscache_unregister(void)
*/
struct cifs_server_key {
uint16_t family; /* address family */
- uint16_t port; /* IP port */
+ __be16 port; /* IP port */
union {
struct in_addr ipv4_addr;
struct in6_addr ipv6_addr;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 65829d32128c..30d01bc90855 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -423,7 +423,6 @@ static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
static const struct file_operations cifs_multiuser_mount_proc_fops;
static const struct file_operations cifs_security_flags_proc_fops;
-static const struct file_operations cifs_experimental_proc_fops;
static const struct file_operations cifs_linux_ext_proc_fops;
void
@@ -441,8 +440,6 @@ cifs_proc_init(void)
proc_create("cifsFYI", 0, proc_fs_cifs, &cifsFYI_proc_fops);
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("OplockEnabled", 0, proc_fs_cifs, &cifs_oplock_proc_fops);
- proc_create("Experimental", 0, proc_fs_cifs,
- &cifs_experimental_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
proc_create("MultiuserMount", 0, proc_fs_cifs,
@@ -469,7 +466,6 @@ cifs_proc_clean(void)
remove_proc_entry("OplockEnabled", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
- remove_proc_entry("Experimental", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
remove_proc_entry("fs/cifs", NULL);
}
@@ -550,45 +546,6 @@ static const struct file_operations cifs_oplock_proc_fops = {
.write = cifs_oplock_proc_write,
};
-static int cifs_experimental_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", experimEnabled);
- return 0;
-}
-
-static int cifs_experimental_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cifs_experimental_proc_show, NULL);
-}
-
-static ssize_t cifs_experimental_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- experimEnabled = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- experimEnabled = 1;
- else if (c == '2')
- experimEnabled = 2;
-
- return count;
-}
-
-static const struct file_operations cifs_experimental_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_experimental_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_experimental_proc_write,
-};
-
static int cifs_linux_ext_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "%d\n", linuxExtEnabled);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 4dfba8283165..33d221394aca 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -113,7 +113,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
MAX_MECH_STR_LEN +
UID_KEY_LEN + (sizeof(uid_t) * 2) +
CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
- USER_KEY_LEN + strlen(sesInfo->userName) +
+ USER_KEY_LEN + strlen(sesInfo->user_name) +
PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
spnego_key = ERR_PTR(-ENOMEM);
@@ -153,7 +153,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
dp = description + strlen(description);
- sprintf(dp, ";user=%s", sesInfo->userName);
+ sprintf(dp, ";user=%s", sesInfo->user_name);
dp = description + strlen(description);
sprintf(dp, ";pid=0x%x", current->pid);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index fc0fd4fde306..23d43cde4306 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -90,7 +90,7 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
case UNI_COLON:
*target = ':';
break;
- case UNI_ASTERIK:
+ case UNI_ASTERISK:
*target = '*';
break;
case UNI_QUESTION:
@@ -264,40 +264,40 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
* names are little endian 16 bit Unicode on the wire
*/
int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+cifsConvertToUCS(__le16 *target, const char *source, int srclen,
const struct nls_table *cp, int mapChars)
{
int i, j, charlen;
- int len_remaining = maxlen;
char src_char;
- __u16 temp;
+ __le16 dst_char;
+ wchar_t tmp;
if (!mapChars)
return cifs_strtoUCS(target, source, PATH_MAX, cp);
- for (i = 0, j = 0; i < maxlen; j++) {
+ for (i = 0, j = 0; i < srclen; j++) {
src_char = source[i];
switch (src_char) {
case 0:
- put_unaligned_le16(0, &target[j]);
+ put_unaligned(0, &target[j]);
goto ctoUCS_out;
case ':':
- temp = UNI_COLON;
+ dst_char = cpu_to_le16(UNI_COLON);
break;
case '*':
- temp = UNI_ASTERIK;
+ dst_char = cpu_to_le16(UNI_ASTERISK);
break;
case '?':
- temp = UNI_QUESTION;
+ dst_char = cpu_to_le16(UNI_QUESTION);
break;
case '<':
- temp = UNI_LESSTHAN;
+ dst_char = cpu_to_le16(UNI_LESSTHAN);
break;
case '>':
- temp = UNI_GRTRTHAN;
+ dst_char = cpu_to_le16(UNI_GRTRTHAN);
break;
case '|':
- temp = UNI_PIPE;
+ dst_char = cpu_to_le16(UNI_PIPE);
break;
/*
* FIXME: We can not handle remapping backslash (UNI_SLASH)
@@ -305,17 +305,17 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
* as they use backslash as separator.
*/
default:
- charlen = cp->char2uni(source+i, len_remaining,
- &temp);
+ charlen = cp->char2uni(source + i, srclen - i, &tmp);
+ dst_char = cpu_to_le16(tmp);
+
/*
* if no match, use question mark, which at least in
* some cases serves as wild card
*/
if (charlen < 1) {
- temp = 0x003f;
+ dst_char = cpu_to_le16(0x003f);
charlen = 1;
}
- len_remaining -= charlen;
/*
* character may take more than one byte in the source
* string, but will take exactly two bytes in the
@@ -324,9 +324,8 @@ cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
i += charlen;
continue;
}
- put_unaligned_le16(temp, &target[j]);
+ put_unaligned(dst_char, &target[j]);
i++; /* move to next char in source string */
- len_remaining--;
}
ctoUCS_out:
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 7fe6b52df507..644dd882a560 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -44,7 +44,7 @@
* reserved symbols (along with \ and /), otherwise illegal to store
* in filenames in NTFS
*/
-#define UNI_ASTERIK (__u16) ('*' + 0xF000)
+#define UNI_ASTERISK (__u16) ('*' + 0xF000)
#define UNI_QUESTION (__u16) ('?' + 0xF000)
#define UNI_COLON (__u16) (':' + 0xF000)
#define UNI_GRTRTHAN (__u16) ('>' + 0xF000)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index a51585f9852b..d1a016be73ba 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -30,12 +30,13 @@
#include <linux/ctype.h>
#include <linux/random.h>
-/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
-/* the 16 byte signature must be allocated by the caller */
-/* Note we only use the 1st eight bytes */
-/* Note that the smb header signature field on input contains the
- sequence number before this function is called */
-
+/*
+ * Calculate and return the CIFS signature based on the mac key and SMB PDU.
+ * The 16 byte signature must be allocated by the caller. Note we only use the
+ * 1st eight bytes and that the smb header signature field on input contains
+ * the sequence number before this function is called. Also, this function
+ * should be called with the server->srv_mutex held.
+ */
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
struct TCP_Server_Info *server, char *signature)
{
@@ -209,8 +210,10 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
+ mutex_lock(&server->srv_mutex);
rc = cifs_calculate_signature(cifs_pdu, server,
what_we_think_sig_should_be);
+ mutex_unlock(&server->srv_mutex);
if (rc)
return rc;
@@ -469,15 +472,15 @@ static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
return rc;
}
- /* convert ses->userName to unicode and uppercase */
- len = strlen(ses->userName);
+ /* convert ses->user_name to unicode and uppercase */
+ len = strlen(ses->user_name);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
rc = -ENOMEM;
goto calc_exit_2;
}
- len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
+ len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
UniStrupr(user);
crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f2970136d17d..5c412b33cd7c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -53,7 +53,6 @@ int cifsFYI = 0;
int cifsERROR = 1;
int traceSMB = 0;
unsigned int oplockEnabled = 1;
-unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
@@ -127,6 +126,7 @@ cifs_read_super(struct super_block *sb, void *data,
kfree(cifs_sb);
return rc;
}
+ cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
#ifdef CONFIG_CIFS_DFS_UPCALL
/* copy mount params to sb for use in submounts */
@@ -409,8 +409,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_printf(s, ",multiuser");
- else if (tcon->ses->userName)
- seq_printf(s, ",username=%s", tcon->ses->userName);
+ else if (tcon->ses->user_name)
+ seq_printf(s, ",username=%s", tcon->ses->user_name);
if (tcon->ses->domainName)
seq_printf(s, ",domain=%s", tcon->ses->domainName);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 17afb0fbcaed..a5d1106fcbde 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -37,10 +37,9 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
-#define MAX_SHARE_SIZE 64 /* used to be 20, this should still be enough */
-#define MAX_USERNAME_SIZE 32 /* 32 is to allow for 15 char names + null
- termination then *2 for unicode versions */
-#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
+#define MAX_SHARE_SIZE 80
+#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
+#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
#define CIFS_MIN_RCV_POOL 4
@@ -92,7 +91,8 @@ enum statusEnum {
CifsNew = 0,
CifsGood,
CifsExiting,
- CifsNeedReconnect
+ CifsNeedReconnect,
+ CifsNeedNegotiate
};
enum securityEnum {
@@ -274,7 +274,7 @@ struct cifsSesInfo {
int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
- char userName[MAX_USERNAME_SIZE + 1];
+ char *user_name;
char *domainName;
char *password;
struct session_key auth_key;
@@ -817,7 +817,6 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
have the uid/password or Kerberos credential
or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled;
-GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2644a5d6cc67..df959bae6728 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -142,9 +142,9 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
*/
while (server->tcpStatus == CifsNeedReconnect) {
wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus == CifsGood), 10 * HZ);
+ (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
- /* is TCP session is reestablished now ?*/
+ /* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
@@ -729,7 +729,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
return rc;
/* set up echo request */
- smb->hdr.Tid = cpu_to_le16(0xffff);
+ smb->hdr.Tid = 0xffff;
smb->hdr.WordCount = 1;
put_unaligned_le16(1, &smb->EchoCount);
put_bcc_le(1, &smb->hdr);
@@ -1884,10 +1884,10 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
__constant_cpu_to_le16(CIFS_WRLCK))
pLockData->fl_type = F_WRLCK;
- pLockData->fl_start = parm_data->start;
- pLockData->fl_end = parm_data->start +
- parm_data->length - 1;
- pLockData->fl_pid = parm_data->pid;
+ pLockData->fl_start = le64_to_cpu(parm_data->start);
+ pLockData->fl_end = pLockData->fl_start +
+ le64_to_cpu(parm_data->length) - 1;
+ pLockData->fl_pid = le32_to_cpu(parm_data->pid);
}
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6e2b2addfc78..db9d55b507d0 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -199,8 +199,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
spin_unlock(&GlobalMid_Lock);
- while ((server->tcpStatus != CifsExiting) &&
- (server->tcpStatus != CifsGood)) {
+ while (server->tcpStatus == CifsNeedReconnect) {
try_to_freeze();
/* we should try only the port we connected to before */
@@ -212,7 +211,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
atomic_inc(&tcpSesReconnectCount);
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsGood;
+ server->tcpStatus = CifsNeedNegotiate;
spin_unlock(&GlobalMid_Lock);
}
}
@@ -248,24 +247,24 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
- remaining = total_data_size - data_in_this_rsp;
-
- if (remaining == 0)
+ if (total_data_size == data_in_this_rsp)
return 0;
- else if (remaining < 0) {
+ else if (total_data_size < data_in_this_rsp) {
cFYI(1, "total data %d smaller than data in frame %d",
total_data_size, data_in_this_rsp);
return -EINVAL;
- } else {
- cFYI(1, "missing %d bytes from transact2, check next response",
- remaining);
- if (total_data_size > maxBufSize) {
- cERROR(1, "TotalDataSize %d is over maximum buffer %d",
- total_data_size, maxBufSize);
- return -EINVAL;
- }
- return remaining;
}
+
+ remaining = total_data_size - data_in_this_rsp;
+
+ cFYI(1, "missing %d bytes from transact2, check next response",
+ remaining);
+ if (total_data_size > maxBufSize) {
+ cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+ total_data_size, maxBufSize);
+ return -EINVAL;
+ }
+ return remaining;
}
static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
@@ -421,7 +420,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
pdu_length = 4; /* enough to get RFC1001 header */
incomplete_rcv:
- if (echo_retries > 0 &&
+ if (echo_retries > 0 && server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp +
(echo_retries * SMB_ECHO_INTERVAL))) {
cERROR(1, "Server %s has not responded in %d seconds. "
@@ -881,7 +880,8 @@ cifs_parse_mount_options(char *options, const char *devname,
/* null user, ie anonymous, authentication */
vol->nullauth = 1;
}
- if (strnlen(value, 200) < 200) {
+ if (strnlen(value, MAX_USERNAME_SIZE) <
+ MAX_USERNAME_SIZE) {
vol->username = value;
} else {
printk(KERN_WARNING "CIFS: username too long\n");
@@ -1472,7 +1472,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
static bool
match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
{
- unsigned short int port, *sport;
+ __be16 port, *sport;
switch (addr->sa_family) {
case AF_INET:
@@ -1765,6 +1765,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
module_put(THIS_MODULE);
goto out_err_crypto_release;
}
+ tcp_ses->tcpStatus = CifsNeedNegotiate;
/* thread spawned, put it on the list */
spin_lock(&cifs_tcp_ses_lock);
@@ -1808,7 +1809,9 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
break;
default:
/* anything else takes username/password */
- if (strncmp(ses->userName, vol->username,
+ if (ses->user_name == NULL)
+ continue;
+ if (strncmp(ses->user_name, vol->username,
MAX_USERNAME_SIZE))
continue;
if (strlen(vol->username) != 0 &&
@@ -1851,6 +1854,8 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
cifs_put_tcp_session(server);
}
+static bool warned_on_ntlm; /* globals init to false automatically */
+
static struct cifsSesInfo *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
@@ -1906,9 +1911,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
else
sprintf(ses->serverName, "%pI4", &addr->sin_addr);
- if (volume_info->username)
- strncpy(ses->userName, volume_info->username,
- MAX_USERNAME_SIZE);
+ if (volume_info->username) {
+ ses->user_name = kstrdup(volume_info->username, GFP_KERNEL);
+ if (!ses->user_name)
+ goto get_ses_fail;
+ }
/* volume_info->password freed at unmount */
if (volume_info->password) {
@@ -1923,6 +1930,15 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
}
ses->cred_uid = volume_info->cred_uid;
ses->linux_uid = volume_info->linux_uid;
+
+ /* ntlmv2 is much stronger than ntlm security, and has been broadly
+ supported for many years, time to update default security mechanism */
+ if ((volume_info->secFlg == 0) && warned_on_ntlm == false) {
+ warned_on_ntlm = true;
+ cERROR(1, "default security mechanism requested. The default "
+ "security mechanism will be upgraded from ntlm to "
+ "ntlmv2 in kernel release 2.6.41");
+ }
ses->overrideSecFlg = volume_info->secFlg;
mutex_lock(&ses->session_mutex);
@@ -2276,7 +2292,7 @@ static int
generic_ip_connect(struct TCP_Server_Info *server)
{
int rc = 0;
- unsigned short int sport;
+ __be16 sport;
int slen, sfamily;
struct socket *socket = server->ssocket;
struct sockaddr *saddr;
@@ -2361,7 +2377,7 @@ generic_ip_connect(struct TCP_Server_Info *server)
static int
ip_connect(struct TCP_Server_Info *server)
{
- unsigned short int *sport;
+ __be16 *sport;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
@@ -2826,7 +2842,7 @@ try_mount_again:
remote_path_check:
/* check if a whole path (including prepath) is not remote */
- if (!rc && cifs_sb->prepathlen && tcon) {
+ if (!rc && tcon) {
/* build_path_to_root works only when we have a valid tcon */
full_path = cifs_build_path_to_root(cifs_sb, tcon);
if (full_path == NULL) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c27d236738fc..faf59529e847 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -575,8 +575,10 @@ reopen_error_exit:
int cifs_close(struct inode *inode, struct file *file)
{
- cifsFileInfo_put(file->private_data);
- file->private_data = NULL;
+ if (file->private_data != NULL) {
+ cifsFileInfo_put(file->private_data);
+ file->private_data = NULL;
+ }
/* return code from the ->release op is always ignored */
return 0;
@@ -970,6 +972,9 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
total_written += bytes_written) {
rc = -EAGAIN;
while (rc == -EAGAIN) {
+ struct kvec iov[2];
+ unsigned int len;
+
if (open_file->invalidHandle) {
/* we could deadlock if we called
filemap_fdatawait from here so tell
@@ -979,31 +984,14 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
if (rc != 0)
break;
}
- if (experimEnabled || (pTcon->ses->server &&
- ((pTcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- == 0))) {
- struct kvec iov[2];
- unsigned int len;
-
- len = min((size_t)cifs_sb->wsize,
- write_size - total_written);
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = (char *)write_data +
- total_written;
- iov[1].iov_len = len;
- rc = CIFSSMBWrite2(xid, pTcon,
- open_file->netfid, len,
- *poffset, &bytes_written,
- iov, 1, 0);
- } else
- rc = CIFSSMBWrite(xid, pTcon,
- open_file->netfid,
- min_t(const int, cifs_sb->wsize,
- write_size - total_written),
- *poffset, &bytes_written,
- write_data + total_written,
- NULL, 0);
+
+ len = min((size_t)cifs_sb->wsize,
+ write_size - total_written);
+ /* iov[0] is reserved for smb header */
+ iov[1].iov_base = (char *)write_data + total_written;
+ iov[1].iov_len = len;
+ rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
+ *poffset, &bytes_written, iov, 1, 0);
}
if (rc || (bytes_written == 0)) {
if (total_written)
@@ -1240,12 +1228,6 @@ static int cifs_writepages(struct address_space *mapping,
}
tcon = tlink_tcon(open_file->tlink);
- if (!experimEnabled && tcon->ses->server->secMode &
- (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- cifsFileInfo_put(open_file);
- kfree(iov);
- return generic_writepages(mapping, wbc);
- }
cifsFileInfo_put(open_file);
xid = GetXid();
@@ -1980,6 +1962,24 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
return total_read;
}
+/*
+ * If the page is mmap'ed into a process' page tables, then we need to make
+ * sure that it doesn't change while being written back.
+ */
+static int
+cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+
+ lock_page(page);
+ return VM_FAULT_LOCKED;
+}
+
+static struct vm_operations_struct cifs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = cifs_page_mkwrite,
+};
+
int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
@@ -1991,6 +1991,8 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
cifs_invalidate_mapping(inode);
rc = generic_file_mmap(file, vma);
+ if (rc == 0)
+ vma->vm_ops = &cifs_file_vm_ops;
FreeXid(xid);
return rc;
}
@@ -2007,6 +2009,8 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc;
}
rc = generic_file_mmap(file, vma);
+ if (rc == 0)
+ vma->vm_ops = &cifs_file_vm_ops;
FreeXid(xid);
return rc;
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e8804d373404..ce417a9764a3 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -239,7 +239,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
if (rc != 0)
return rc;
- if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
CIFSSMBClose(xid, tcon, netfid);
/* it's not a symlink */
return -EINVAL;
@@ -316,7 +316,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
if (rc != 0)
goto out;
- if (file_info.EndOfFile != CIFS_MF_SYMLINK_FILE_SIZE) {
+ if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
CIFSSMBClose(xid, pTcon, netfid);
/* it's not a symlink */
goto out;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 2a930a752a78..0c684ae4c071 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -100,6 +100,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
memset(buf_to_free->password, 0, strlen(buf_to_free->password));
kfree(buf_to_free->password);
}
+ kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kfree(buf_to_free);
}
@@ -520,7 +521,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
- if (pSMBr->ByteCount > sizeof(struct file_notify_information)) {
+ if (get_bcc_le(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
pnotify = (struct file_notify_information *)
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 16765703131b..f6728eb6f4b9 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -219,12 +219,12 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
bcc_ptr++;
} */
/* copy user */
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
} else {
- bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
@@ -244,12 +244,11 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
- if (ses->userName == NULL) {
- /* BB what about null user mounts - check that we do this BB */
- } else {
- strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
- }
- bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
+ if (ses->user_name != NULL)
+ strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+ /* else null user mount */
+
+ bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
@@ -405,8 +404,8 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
- tioffset = cpu_to_le16(pblob->TargetInfoArray.BufferOffset);
- tilen = cpu_to_le16(pblob->TargetInfoArray.Length);
+ tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
+ tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tilen) {
ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
@@ -523,14 +522,14 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
tmp += len;
}
- if (ses->userName == NULL) {
+ if (ses->user_name == NULL) {
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
sec_blob->UserName.Length = 0;
sec_blob->UserName.MaximumLength = 0;
tmp += 2;
} else {
int len;
- len = cifs_strtoUCS((__le16 *)tmp, ses->userName,
+ len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
MAX_USERNAME_SIZE, nls_cp);
len *= 2; /* unicode is 2 bytes each */
sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
diff --git a/fs/dcache.c b/fs/dcache.c
index ad25c4cec7d5..22a0ef41bad1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -99,12 +99,9 @@ static struct kmem_cache *dentry_cache __read_mostly;
static unsigned int d_hash_mask __read_mostly;
static unsigned int d_hash_shift __read_mostly;
-struct dcache_hash_bucket {
- struct hlist_bl_head head;
-};
-static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
+static struct hlist_bl_head *dentry_hashtable __read_mostly;
-static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
+static inline struct hlist_bl_head *d_hash(struct dentry *parent,
unsigned long hash)
{
hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
@@ -112,16 +109,6 @@ static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
return dentry_hashtable + (hash & D_HASHMASK);
}
-static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
-{
- bit_spin_lock(0, (unsigned long *)&b->head.first);
-}
-
-static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
-{
- __bit_spin_unlock(0, (unsigned long *)&b->head.first);
-}
-
/* Statistics gathering. */
struct dentry_stat_t dentry_stat = {
.age_limit = 45,
@@ -167,8 +154,8 @@ static void d_free(struct dentry *dentry)
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
- /* if dentry was never inserted into hash, immediate free is OK */
- if (hlist_bl_unhashed(&dentry->d_hash))
+ /* if dentry was never visible to RCU, immediate free is OK */
+ if (!(dentry->d_flags & DCACHE_RCUACCESS))
__d_free(&dentry->d_u.d_rcu);
else
call_rcu(&dentry->d_u.d_rcu, __d_free);
@@ -330,28 +317,19 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
*/
void __d_drop(struct dentry *dentry)
{
- if (!(dentry->d_flags & DCACHE_UNHASHED)) {
- if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) {
- bit_spin_lock(0,
- (unsigned long *)&dentry->d_sb->s_anon.first);
- dentry->d_flags |= DCACHE_UNHASHED;
- hlist_bl_del_init(&dentry->d_hash);
- __bit_spin_unlock(0,
- (unsigned long *)&dentry->d_sb->s_anon.first);
- } else {
- struct dcache_hash_bucket *b;
+ if (!d_unhashed(dentry)) {
+ struct hlist_bl_head *b;
+ if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
+ b = &dentry->d_sb->s_anon;
+ else
b = d_hash(dentry->d_parent, dentry->d_name.hash);
- spin_lock_bucket(b);
- /*
- * We may not actually need to put DCACHE_UNHASHED
- * manipulations under the hash lock, but follow
- * the principle of least surprise.
- */
- dentry->d_flags |= DCACHE_UNHASHED;
- hlist_bl_del_rcu(&dentry->d_hash);
- spin_unlock_bucket(b);
- dentry_rcuwalk_barrier(dentry);
- }
+
+ hlist_bl_lock(b);
+ __hlist_bl_del(&dentry->d_hash);
+ dentry->d_hash.pprev = NULL;
+ hlist_bl_unlock(b);
+
+ dentry_rcuwalk_barrier(dentry);
}
}
EXPORT_SYMBOL(__d_drop);
@@ -1304,7 +1282,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
dname[name->len] = 0;
dentry->d_count = 1;
- dentry->d_flags = DCACHE_UNHASHED;
+ dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
dentry->d_inode = NULL;
@@ -1606,10 +1584,9 @@ struct dentry *d_obtain_alias(struct inode *inode)
tmp->d_inode = inode;
tmp->d_flags |= DCACHE_DISCONNECTED;
list_add(&tmp->d_alias, &inode->i_dentry);
- bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
- tmp->d_flags &= ~DCACHE_UNHASHED;
+ hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
- __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first);
+ hlist_bl_unlock(&tmp->d_sb->s_anon);
spin_unlock(&tmp->d_lock);
spin_unlock(&inode->i_lock);
security_d_instantiate(tmp, inode);
@@ -1789,7 +1766,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct dcache_hash_bucket *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *dentry;
@@ -1813,7 +1790,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
*
* See Documentation/filesystems/path-lookup.txt for more details.
*/
- hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
struct inode *i;
const char *tname;
int tlen;
@@ -1908,7 +1885,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
- struct dcache_hash_bucket *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *found = NULL;
struct dentry *dentry;
@@ -1935,7 +1912,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
*/
rcu_read_lock();
- hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) {
+ hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
const char *tname;
int tlen;
@@ -2086,13 +2063,13 @@ again:
}
EXPORT_SYMBOL(d_delete);
-static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b)
+static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
{
BUG_ON(!d_unhashed(entry));
- spin_lock_bucket(b);
- entry->d_flags &= ~DCACHE_UNHASHED;
- hlist_bl_add_head_rcu(&entry->d_hash, &b->head);
- spin_unlock_bucket(b);
+ hlist_bl_lock(b);
+ entry->d_flags |= DCACHE_RCUACCESS;
+ hlist_bl_add_head_rcu(&entry->d_hash, b);
+ hlist_bl_unlock(b);
}
static void _d_rehash(struct dentry * entry)
@@ -2131,7 +2108,7 @@ EXPORT_SYMBOL(d_rehash);
*/
void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
{
- BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
+ BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
spin_lock(&dentry->d_lock);
@@ -3025,7 +3002,7 @@ static void __init dcache_init_early(void)
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
- sizeof(struct dcache_hash_bucket),
+ sizeof(struct hlist_bl_head),
dhash_entries,
13,
HASH_EARLY,
@@ -3034,7 +3011,7 @@ static void __init dcache_init_early(void)
0);
for (loop = 0; loop < (1 << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
static void __init dcache_init(void)
@@ -3057,7 +3034,7 @@ static void __init dcache_init(void)
dentry_hashtable =
alloc_large_system_hash("Dentry cache",
- sizeof(struct dcache_hash_bucket),
+ sizeof(struct hlist_bl_head),
dhash_entries,
13,
0,
@@ -3066,7 +3043,7 @@ static void __init dcache_init(void)
0);
for (loop = 0; loop < (1 << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head);
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
/* SLAB cache for __getname() consumers */
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index d2a70a4561f9..b8d5c8091024 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1452,6 +1452,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
+{
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ struct ecryptfs_crypt_stat *crypt_stat;
+ u64 file_size;
+
+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
+ mount_crypt_stat =
+ &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
+ if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
+ file_size = i_size_read(ecryptfs_inode_to_lower(inode));
+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
+ file_size += crypt_stat->metadata_size;
+ } else
+ file_size = get_unaligned_be64(page_virt);
+ i_size_write(inode, (loff_t)file_size);
+ crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
+}
+
/**
* ecryptfs_read_headers_virt
* @page_virt: The virtual address into which to read the headers
@@ -1482,6 +1501,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
rc = -EINVAL;
goto out;
}
+ if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
&bytes_read);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index bd3cafd0949d..e70282775e2c 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -269,6 +269,7 @@ struct ecryptfs_crypt_stat {
#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800
#define ECRYPTFS_ENCFN_USE_FEK 0x00001000
#define ECRYPTFS_UNLINK_SIGS 0x00002000
+#define ECRYPTFS_I_SIZE_INITIALIZED 0x00004000
u32 flags;
unsigned int file_version;
size_t iv_bytes;
@@ -295,6 +296,8 @@ struct ecryptfs_crypt_stat {
struct ecryptfs_inode_info {
struct inode vfs_inode;
struct inode *wii_inode;
+ struct mutex lower_file_mutex;
+ atomic_t lower_file_count;
struct file *lower_file;
struct ecryptfs_crypt_stat crypt_stat;
};
@@ -626,6 +629,7 @@ struct ecryptfs_open_req {
int ecryptfs_interpose(struct dentry *hidden_dentry,
struct dentry *this_dentry, struct super_block *sb,
u32 flags);
+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
struct dentry *lower_dentry,
struct inode *ecryptfs_dir_inode);
@@ -757,7 +761,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
struct dentry *lower_dentry,
struct vfsmount *lower_mnt,
const struct cred *cred);
-int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry);
+int ecryptfs_get_lower_file(struct dentry *ecryptfs_dentry);
+void ecryptfs_put_lower_file(struct inode *inode);
int
ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
size_t *packet_size,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index cedc913d11ba..566e5472f78c 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -191,10 +191,10 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
- rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
- "the persistent file for the dentry with name "
+ "the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out_free;
@@ -202,9 +202,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE)
== O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) {
rc = -EPERM;
- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ printk(KERN_WARNING "%s: Lower file is RO; eCryptfs "
"file must hence be opened RO\n", __func__);
- goto out_free;
+ goto out_put;
}
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
@@ -232,10 +232,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
"Plaintext passthrough mode is not "
"enabled; returning -EIO\n");
mutex_unlock(&crypt_stat->cs_mutex);
- goto out_free;
+ goto out_put;
}
rc = 0;
- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+ | ECRYPTFS_ENCRYPTED);
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
@@ -245,6 +246,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
"[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
(unsigned long long)i_size_read(inode));
goto out;
+out_put:
+ ecryptfs_put_lower_file(inode);
out_free:
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
@@ -254,17 +257,13 @@ out:
static int ecryptfs_flush(struct file *file, fl_owner_t td)
{
- int rc = 0;
- struct file *lower_file = NULL;
-
- lower_file = ecryptfs_file_to_lower(file);
- if (lower_file->f_op && lower_file->f_op->flush)
- rc = lower_file->f_op->flush(lower_file, td);
- return rc;
+ return file->f_mode & FMODE_WRITE
+ ? filemap_write_and_wait(file->f_mapping) : 0;
}
static int ecryptfs_release(struct inode *inode, struct file *file)
{
+ ecryptfs_put_lower_file(inode);
kmem_cache_free(ecryptfs_file_info_cache,
ecryptfs_file_to_private(file));
return 0;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index f99051b7adab..4d4cc6a90cd5 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -168,19 +168,18 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
"context; rc = [%d]\n", rc);
goto out;
}
- rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
- "the persistent file for the dentry with name "
+ "the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out;
}
rc = ecryptfs_write_metadata(ecryptfs_dentry);
- if (rc) {
+ if (rc)
printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
- goto out;
- }
+ ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
out:
return rc;
}
@@ -226,11 +225,9 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
struct dentry *lower_dir_dentry;
struct vfsmount *lower_mnt;
struct inode *lower_inode;
- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
struct ecryptfs_crypt_stat *crypt_stat;
char *page_virt = NULL;
- u64 file_size;
- int rc = 0;
+ int put_lower = 0, rc = 0;
lower_dir_dentry = lower_dentry->d_parent;
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
@@ -277,14 +274,15 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
rc = -ENOMEM;
goto out;
}
- rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
+ rc = ecryptfs_get_lower_file(ecryptfs_dentry);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
- "the persistent file for the dentry with name "
+ "the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out_free_kmem;
}
+ put_lower = 1;
crypt_stat = &ecryptfs_inode_to_private(
ecryptfs_dentry->d_inode)->crypt_stat;
/* TODO: lock for crypt_stat comparison */
@@ -302,18 +300,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
}
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
}
- mount_crypt_stat = &ecryptfs_superblock_to_private(
- ecryptfs_dentry->d_sb)->mount_crypt_stat;
- if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
- if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- file_size = (crypt_stat->metadata_size
- + i_size_read(lower_dentry->d_inode));
- else
- file_size = i_size_read(lower_dentry->d_inode);
- } else {
- file_size = get_unaligned_be64(page_virt);
- }
- i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
out_free_kmem:
kmem_cache_free(ecryptfs_header_cache_2, page_virt);
goto out;
@@ -322,6 +309,8 @@ out_put:
mntput(lower_mnt);
d_drop(ecryptfs_dentry);
out:
+ if (put_lower)
+ ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
return rc;
}
@@ -538,8 +527,6 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
dget(lower_dentry);
rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
dput(lower_dentry);
- if (!rc)
- d_delete(lower_dentry);
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
unlock_dir(lower_dir_dentry);
@@ -610,8 +597,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
out_lock:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
- dput(lower_new_dentry->d_parent);
- dput(lower_old_dentry->d_parent);
+ dput(lower_new_dir_dentry);
+ dput(lower_old_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
return rc;
@@ -759,8 +746,11 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
if (unlikely((ia->ia_size == i_size))) {
lower_ia->ia_valid &= ~ATTR_SIZE;
- goto out;
+ return 0;
}
+ rc = ecryptfs_get_lower_file(dentry);
+ if (rc)
+ return rc;
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
/* Switch on growing or shrinking file */
if (ia->ia_size > i_size) {
@@ -838,6 +828,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
lower_ia->ia_valid &= ~ATTR_SIZE;
}
out:
+ ecryptfs_put_lower_file(inode);
return rc;
}
@@ -913,7 +904,13 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
+ rc = ecryptfs_get_lower_file(dentry);
+ if (rc) {
+ mutex_unlock(&crypt_stat->cs_mutex);
+ goto out;
+ }
rc = ecryptfs_read_metadata(dentry);
+ ecryptfs_put_lower_file(inode);
if (rc) {
if (!(mount_crypt_stat->flags
& ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
@@ -927,10 +924,17 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
goto out;
}
rc = 0;
- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+ | ECRYPTFS_ENCRYPTED);
}
}
mutex_unlock(&crypt_stat->cs_mutex);
+ if (S_ISREG(inode->i_mode)) {
+ rc = filemap_write_and_wait(inode->i_mapping);
+ if (rc)
+ goto out;
+ fsstack_copy_attr_all(inode, lower_inode);
+ }
memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 0851ab6980f5..69f994a7d524 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -44,7 +44,7 @@ static struct task_struct *ecryptfs_kthread;
* @ignored: ignored
*
* The eCryptfs kernel thread that has the responsibility of getting
- * the lower persistent file with RW permissions.
+ * the lower file with RW permissions.
*
* Returns zero on success; non-zero otherwise
*/
@@ -141,8 +141,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
int rc = 0;
/* Corresponding dput() and mntput() are done when the
- * persistent file is fput() when the eCryptfs inode is
- * destroyed. */
+ * lower file is fput() when all eCryptfs files for the inode are
+ * released. */
dget(lower_dentry);
mntget(lower_mnt);
flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index fdb2eb0ad09e..89b93389af8e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -96,7 +96,7 @@ void __ecryptfs_printk(const char *fmt, ...)
}
/**
- * ecryptfs_init_persistent_file
+ * ecryptfs_init_lower_file
* @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with
* the lower dentry and the lower mount set
*
@@ -104,42 +104,70 @@ void __ecryptfs_printk(const char *fmt, ...)
* inode. All I/O operations to the lower inode occur through that
* file. When the first eCryptfs dentry that interposes with the first
* lower dentry for that inode is created, this function creates the
- * persistent file struct and associates it with the eCryptfs
- * inode. When the eCryptfs inode is destroyed, the file is closed.
+ * lower file struct and associates it with the eCryptfs
+ * inode. When all eCryptfs files associated with the inode are released, the
+ * file is closed.
*
- * The persistent file will be opened with read/write permissions, if
+ * The lower file will be opened with read/write permissions, if
* possible. Otherwise, it is opened read-only.
*
- * This function does nothing if a lower persistent file is already
+ * This function does nothing if a lower file is already
* associated with the eCryptfs inode.
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
+static int ecryptfs_init_lower_file(struct dentry *dentry,
+ struct file **lower_file)
{
const struct cred *cred = current_cred();
- struct ecryptfs_inode_info *inode_info =
- ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
- int rc = 0;
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ int rc;
- if (!inode_info->lower_file) {
- struct dentry *lower_dentry;
- struct vfsmount *lower_mnt =
- ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
+ rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt,
+ cred);
+ if (rc) {
+ printk(KERN_ERR "Error opening lower file "
+ "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
+ "rc = [%d]\n", lower_dentry, lower_mnt, rc);
+ (*lower_file) = NULL;
+ }
+ return rc;
+}
- lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
- rc = ecryptfs_privileged_open(&inode_info->lower_file,
- lower_dentry, lower_mnt, cred);
- if (rc) {
- printk(KERN_ERR "Error opening lower persistent file "
- "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
- "rc = [%d]\n", lower_dentry, lower_mnt, rc);
- inode_info->lower_file = NULL;
- }
+int ecryptfs_get_lower_file(struct dentry *dentry)
+{
+ struct ecryptfs_inode_info *inode_info =
+ ecryptfs_inode_to_private(dentry->d_inode);
+ int count, rc = 0;
+
+ mutex_lock(&inode_info->lower_file_mutex);
+ count = atomic_inc_return(&inode_info->lower_file_count);
+ if (WARN_ON_ONCE(count < 1))
+ rc = -EINVAL;
+ else if (count == 1) {
+ rc = ecryptfs_init_lower_file(dentry,
+ &inode_info->lower_file);
+ if (rc)
+ atomic_set(&inode_info->lower_file_count, 0);
}
+ mutex_unlock(&inode_info->lower_file_mutex);
return rc;
}
+void ecryptfs_put_lower_file(struct inode *inode)
+{
+ struct ecryptfs_inode_info *inode_info;
+
+ inode_info = ecryptfs_inode_to_private(inode);
+ if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
+ &inode_info->lower_file_mutex)) {
+ fput(inode_info->lower_file);
+ inode_info->lower_file = NULL;
+ mutex_unlock(&inode_info->lower_file_mutex);
+ }
+}
+
static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb)
{
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index bacc882e1ae4..245b517bf1b6 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -55,6 +55,8 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
if (unlikely(!inode_info))
goto out;
ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
+ mutex_init(&inode_info->lower_file_mutex);
+ atomic_set(&inode_info->lower_file_count, 0);
inode_info->lower_file = NULL;
inode = &inode_info->vfs_inode;
out:
@@ -77,8 +79,7 @@ static void ecryptfs_i_callback(struct rcu_head *head)
*
* This is used during the final destruction of the inode. All
* allocation of memory related to the inode, including allocated
- * memory in the crypt_stat struct, will be released here. This
- * function also fput()'s the persistent file for the lower inode.
+ * memory in the crypt_stat struct, will be released here.
* There should be no chance that this deallocation will be missed.
*/
static void ecryptfs_destroy_inode(struct inode *inode)
@@ -86,16 +87,7 @@ static void ecryptfs_destroy_inode(struct inode *inode)
struct ecryptfs_inode_info *inode_info;
inode_info = ecryptfs_inode_to_private(inode);
- if (inode_info->lower_file) {
- struct dentry *lower_dentry =
- inode_info->lower_file->f_dentry;
-
- BUG_ON(!lower_dentry);
- if (lower_dentry->d_inode) {
- fput(inode_info->lower_file);
- inode_info->lower_file = NULL;
- }
- }
+ BUG_ON(inode_info->lower_file);
ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
call_rcu(&inode->i_rcu, ecryptfs_i_callback);
}
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index b5c2f3c97d71..68b2e43d7c35 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3291,7 +3291,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
if (ext3_should_journal_data(inode))
ret = 3 * (bpp + indirects) + 2;
else
- ret = 2 * (bpp + indirects) + 2;
+ ret = 2 * (bpp + indirects) + indirects + 2;
#ifdef CONFIG_QUOTA
/* We know that structure was already allocated during dquot_initialize so
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index e25e99bf7ee1..d0f53538a57f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -86,8 +86,8 @@
#ifdef CONFIG_QUOTA
/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
+ * allocated so we need to update only data block */
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
/* Amount of blocks needed for quota insert/delete - we do some block writes
* but inode, sb and group updates are done only once */
#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 4673bc05274f..e9473cbe80df 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -125,9 +125,11 @@ extern int ext4_flush_completed_IO(struct inode *inode)
* the parent directory's parent as well, and so on recursively, if
* they are also freshly created.
*/
-static void ext4_sync_parent(struct inode *inode)
+static int ext4_sync_parent(struct inode *inode)
{
+ struct writeback_control wbc;
struct dentry *dentry = NULL;
+ int ret = 0;
while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
@@ -136,8 +138,17 @@ static void ext4_sync_parent(struct inode *inode)
if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
break;
inode = dentry->d_parent->d_inode;
- sync_mapping_buffers(inode->i_mapping);
+ ret = sync_mapping_buffers(inode->i_mapping);
+ if (ret)
+ break;
+ memset(&wbc, 0, sizeof(wbc));
+ wbc.sync_mode = WB_SYNC_ALL;
+ wbc.nr_to_write = 0; /* only write out the inode */
+ ret = sync_inode(inode, &wbc);
+ if (ret)
+ break;
}
+ return ret;
}
/*
@@ -176,7 +187,7 @@ int ext4_sync_file(struct file *file, int datasync)
if (!journal) {
ret = generic_file_fsync(file, datasync);
if (!ret && !list_empty(&inode->i_dentry))
- ext4_sync_parent(inode);
+ ret = ext4_sync_parent(inode);
goto out;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ad8e303c0d29..f2fa5e8a582c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2502,6 +2502,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
* for partial write.
*/
set_buffer_new(bh);
+ set_buffer_mapped(bh);
}
return 0;
}
@@ -4429,8 +4430,8 @@ void ext4_truncate(struct inode *inode)
Indirect chain[4];
Indirect *partial;
__le32 nr = 0;
- int n;
- ext4_lblk_t last_block;
+ int n = 0;
+ ext4_lblk_t last_block, max_block;
unsigned blocksize = inode->i_sb->s_blocksize;
trace_ext4_truncate_enter(inode);
@@ -4455,14 +4456,18 @@ void ext4_truncate(struct inode *inode)
last_block = (inode->i_size + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
+ max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
+ >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (inode->i_size & (blocksize - 1))
if (ext4_block_truncate_page(handle, mapping, inode->i_size))
goto out_stop;
- n = ext4_block_to_path(inode, last_block, offsets, NULL);
- if (n == 0)
- goto out_stop; /* error */
+ if (last_block != max_block) {
+ n = ext4_block_to_path(inode, last_block, offsets, NULL);
+ if (n == 0)
+ goto out_stop; /* error */
+ }
/*
* OK. This truncate is going to happen. We add the inode to the
@@ -4493,7 +4498,13 @@ void ext4_truncate(struct inode *inode)
*/
ei->i_disksize = inode->i_size;
- if (n == 1) { /* direct blocks */
+ if (last_block == max_block) {
+ /*
+ * It is unnecessary to free any data blocks if last_block is
+ * equal to the indirect block limit.
+ */
+ goto out_unlock;
+ } else if (n == 1) { /* direct blocks */
ext4_free_data(handle, inode, NULL, i_data+offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto do_indirects;
@@ -4553,6 +4564,7 @@ do_indirects:
;
}
+out_unlock:
up_write(&ei->i_data_sem);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
@@ -5398,13 +5410,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
/* if nrblocks are contiguous */
if (chunk) {
/*
- * With N contiguous data blocks, it need at most
- * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks
- * 2 dindirect blocks
- * 1 tindirect block
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
*/
- indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb);
- return indirects + 3;
+ return DIV_ROUND_UP(nrblocks,
+ EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4;
}
/*
* if nrblocks are not contiguous, worse case, each block touch
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 056474b7b8e0..8553dfb310af 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -242,27 +242,44 @@ static void ext4_put_nojournal(handle_t *handle)
* journal_end calls result in the superblock being marked dirty, so
* that sync() will call the filesystem's write_super callback if
* appropriate.
+ *
+ * To avoid j_barrier hold in userspace when a user calls freeze(),
+ * ext4 prevents a new handle from being started by s_frozen, which
+ * is in an upper layer.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
journal_t *journal;
+ handle_t *handle;
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
- vfs_check_frozen(sb, SB_FREEZE_TRANS);
- /* Special case here: if the journal has aborted behind our
- * backs (eg. EIO in the commit thread), then we still need to
- * take the FS itself readonly cleanly. */
journal = EXT4_SB(sb)->s_journal;
- if (journal) {
- if (is_journal_aborted(journal)) {
- ext4_abort(sb, "Detected aborted journal");
- return ERR_PTR(-EROFS);
- }
- return jbd2_journal_start(journal, nblocks);
+ handle = ext4_journal_current_handle();
+
+ /*
+ * If a handle has been started, it should be allowed to
+ * finish, otherwise deadlock could happen between freeze
+ * and others(e.g. truncate) due to the restart of the
+ * journal handle if the filesystem is forzen and active
+ * handles are not stopped.
+ */
+ if (!handle)
+ vfs_check_frozen(sb, SB_FREEZE_TRANS);
+
+ if (!journal)
+ return ext4_get_nojournal();
+ /*
+ * Special case here: if the journal has aborted behind our
+ * backs (eg. EIO in the commit thread), then we still need to
+ * take the FS itself readonly cleanly.
+ */
+ if (is_journal_aborted(journal)) {
+ ext4_abort(sb, "Detected aborted journal");
+ return ERR_PTR(-EROFS);
}
- return ext4_get_nojournal();
+ return jbd2_journal_start(journal, nblocks);
}
/*
@@ -2975,6 +2992,12 @@ static int ext4_register_li_request(struct super_block *sb,
mutex_unlock(&ext4_li_info->li_list_mtx);
sbi->s_li_request = elr;
+ /*
+ * set elr to NULL here since it has been inserted to
+ * the request_list and the removal and free of it is
+ * handled by ext4_clear_request_list from now on.
+ */
+ elr = NULL;
if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) {
ret = ext4_run_lazyinit_thread();
@@ -3385,6 +3408,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
+ init_timer(&sbi->s_err_report);
+ sbi->s_err_report.function = print_daily_error_info;
+ sbi->s_err_report.data = (unsigned long) sb;
+
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
@@ -3646,9 +3673,6 @@ no_journal:
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
- init_timer(&sbi->s_err_report);
- sbi->s_err_report.function = print_daily_error_info;
- sbi->s_err_report.data = (unsigned long) sb;
if (es->s_error_count)
mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -3672,6 +3696,7 @@ failed_mount_wq:
sbi->s_journal = NULL;
}
failed_mount3:
+ del_timer(&sbi->s_err_report);
if (sbi->s_flex_groups) {
if (is_vmalloc_addr(sbi->s_flex_groups))
vfree(sbi->s_flex_groups);
@@ -4138,6 +4163,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
/*
* LVM calls this function before a (read-only) snapshot is created. This
* gives us a chance to flush the journal completely and mark the fs clean.
+ *
+ * Note that only this function cannot bring a filesystem to be in a clean
+ * state independently, because ext4 prevents a new handle from being started
+ * by @sb->s_frozen, which stays in an upper layer. It thus needs help from
+ * the upper layer.
*/
static int ext4_freeze(struct super_block *sb)
{
@@ -4614,11 +4644,24 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
static int ext4_quota_off(struct super_block *sb, int type)
{
+ struct inode *inode = sb_dqopt(sb)->files[type];
+ handle_t *handle;
+
/* Force all delayed allocation blocks to be allocated.
* Caller already holds s_umount sem */
if (test_opt(sb, DELALLOC))
sync_filesystem(sb);
+ /* Update modification times of quota files when userspace can
+ * start looking at them */
+ handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle))
+ goto out;
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+
+out:
return dquot_quota_off(sb, type);
}
@@ -4714,9 +4757,8 @@ out:
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
+ ext4_mark_inode_dirty(handle, inode);
}
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- ext4_mark_inode_dirty(handle, inode);
mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/fhandle.c b/fs/fhandle.c
index bf93ad2bee07..6b088641f5bf 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -7,6 +7,7 @@
#include <linux/exportfs.h>
#include <linux/fs_struct.h>
#include <linux/fsnotify.h>
+#include <linux/personality.h>
#include <asm/uaccess.h>
#include "internal.h"
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 751d6b255a12..0845f84f2a5f 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs)
*tmp = fs->next;
fs->next = NULL;
write_unlock(&file_systems_lock);
+ synchronize_rcu();
return 0;
}
tmp = &(*tmp)->next;
}
write_unlock(&file_systems_lock);
- synchronize_rcu();
-
return -EINVAL;
}
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index c71995b111bf..0f5c4f9d5d62 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -884,8 +884,8 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
}
brelse(dibh);
- gfs2_trans_end(sdp);
failed:
+ gfs2_trans_end(sdp);
if (al) {
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 5c356d09c321..f789c5732b7c 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1506,7 +1506,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
- be64_to_cpu(dent->de_inum.no_formal_ino));
+ be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index b2682e073eee..e48310885c48 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return generic_file_aio_write(iocb, iov, nr_segs, pos);
}
-static void empty_write_end(struct page *page, unsigned from,
- unsigned to)
+static int empty_write_end(struct page *page, unsigned from,
+ unsigned to, int mode)
{
- struct gfs2_inode *ip = GFS2_I(page->mapping->host);
+ struct inode *inode = page->mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct buffer_head *bh;
+ unsigned offset, blksize = 1 << inode->i_blkbits;
+ pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
zero_user(page, from, to-from);
mark_page_accessed(page);
- if (!gfs2_is_writeback(ip))
- gfs2_page_add_databufs(ip, page, from, to);
+ if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
+ if (!gfs2_is_writeback(ip))
+ gfs2_page_add_databufs(ip, page, from, to);
+
+ block_commit_write(page, from, to);
+ return 0;
+ }
+
+ offset = 0;
+ bh = page_buffers(page);
+ while (offset < to) {
+ if (offset >= from) {
+ set_buffer_uptodate(bh);
+ mark_buffer_dirty(bh);
+ clear_buffer_new(bh);
+ write_dirty_buffer(bh, WRITE);
+ }
+ offset += blksize;
+ bh = bh->b_this_page;
+ }
- block_commit_write(page, from, to);
+ offset = 0;
+ bh = page_buffers(page);
+ while (offset < to) {
+ if (offset >= from) {
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ return -EIO;
+ }
+ offset += blksize;
+ bh = bh->b_this_page;
+ }
+ return 0;
}
static int needs_empty_write(sector_t block, struct inode *inode)
@@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
return !buffer_mapped(&bh_map);
}
-static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
+static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
+ int mode)
{
struct inode *inode = page->mapping->host;
unsigned start, end, next, blksize;
@@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
gfs2_block_map);
if (unlikely(ret))
return ret;
- empty_write_end(page, start, end);
+ ret = empty_write_end(page, start, end, mode);
+ if (unlikely(ret))
+ return ret;
end = 0;
}
start = next;
@@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
ret = __block_write_begin(page, start, end - start, gfs2_block_map);
if (unlikely(ret))
return ret;
- empty_write_end(page, start, end);
+ ret = empty_write_end(page, start, end, mode);
+ if (unlikely(ret))
+ return ret;
}
return 0;
@@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
if (curr == end)
to = end_offset;
- error = write_empty_blocks(page, from, to);
+ error = write_empty_blocks(page, from, to, mode);
if (!error && offset + to > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) {
i_size_write(inode, offset + to);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f07643e21bfa..7a4fb630a320 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -93,14 +93,12 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
static inline void spin_lock_bucket(unsigned int hash)
{
- struct hlist_bl_head *bl = &gl_hash_table[hash];
- bit_spin_lock(0, (unsigned long *)bl);
+ hlist_bl_lock(&gl_hash_table[hash]);
}
static inline void spin_unlock_bucket(unsigned int hash)
{
- struct hlist_bl_head *bl = &gl_hash_table[hash];
- __bit_spin_unlock(0, (unsigned long *)bl);
+ hlist_bl_unlock(&gl_hash_table[hash]);
}
static void gfs2_glock_dealloc(struct rcu_head *rcu)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 3754e3cbf02b..25eeb2bcee47 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -385,6 +385,10 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
static void iopen_go_callback(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+
+ if (sdp->sd_vfs->s_flags & MS_RDONLY)
+ return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 97d54a28776a..9134dcb89479 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,61 @@ struct gfs2_inum_range_host {
u64 ir_length;
};
+struct gfs2_skip_data {
+ u64 no_addr;
+ int skipped;
+ int non_block;
+};
+
static int iget_test(struct inode *inode, void *opaque)
{
struct gfs2_inode *ip = GFS2_I(inode);
- u64 *no_addr = opaque;
+ struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == *no_addr)
+ if (ip->i_no_addr == data->no_addr) {
+ if (data->non_block &&
+ inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
+ data->skipped = 1;
+ return 0;
+ }
return 1;
-
+ }
return 0;
}
static int iget_set(struct inode *inode, void *opaque)
{
struct gfs2_inode *ip = GFS2_I(inode);
- u64 *no_addr = opaque;
+ struct gfs2_skip_data *data = opaque;
- inode->i_ino = (unsigned long)*no_addr;
- ip->i_no_addr = *no_addr;
+ if (data->skipped)
+ return -ENOENT;
+ inode->i_ino = (unsigned long)(data->no_addr);
+ ip->i_no_addr = data->no_addr;
return 0;
}
struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
{
unsigned long hash = (unsigned long)no_addr;
- return ilookup5(sb, hash, iget_test, &no_addr);
+ struct gfs2_skip_data data;
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ data.non_block = 0;
+ return ilookup5(sb, hash, iget_test, &data);
}
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
+ int non_block)
{
+ struct gfs2_skip_data data;
unsigned long hash = (unsigned long)no_addr;
- return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
+
+ data.no_addr = no_addr;
+ data.skipped = 0;
+ data.non_block = non_block;
+ return iget5_locked(sb, hash, iget_test, iget_set, &data);
}
/**
@@ -111,19 +135,20 @@ static void gfs2_set_iop(struct inode *inode)
* @sb: The super block
* @no_addr: The inode number
* @type: The type of the inode
+ * non_block: Can we block on inodes that are being freed?
*
* Returns: A VFS inode, or an error
*/
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
- u64 no_addr, u64 no_formal_ino)
+ u64 no_addr, u64 no_formal_ino, int non_block)
{
struct inode *inode;
struct gfs2_inode *ip;
struct gfs2_glock *io_gl = NULL;
int error;
- inode = gfs2_iget(sb, no_addr);
+ inode = gfs2_iget(sb, no_addr, non_block);
ip = GFS2_I(inode);
if (!inode)
@@ -185,11 +210,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
{
struct super_block *sb = sdp->sd_vfs;
struct gfs2_holder i_gh;
- struct inode *inode;
+ struct inode *inode = NULL;
int error;
+ /* Must not read in block until block type is verified */
error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
- LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+ LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
if (error)
return ERR_PTR(error);
@@ -197,7 +223,7 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
if (error)
goto fail;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
if (IS_ERR(inode))
goto fail;
@@ -843,7 +869,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
goto fail_gunlock2;
inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
- inum.no_formal_ino);
+ inum.no_formal_ino, 0);
if (IS_ERR(inode))
goto fail_gunlock2;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 3e00a66e7cbd..099ca305e518 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -97,7 +97,8 @@ err:
}
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino);
+ u64 no_addr, u64 no_formal_ino,
+ int non_block);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
u64 *no_formal_ino,
unsigned int blktype);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 42ef24355afb..d3c69eb91c74 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -430,7 +430,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
struct dentry *dentry;
struct inode *inode;
- inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index cf930cd9664a..6fcae8469f6d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -945,7 +945,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
/* rgblk_search can return a block < goal, so we need to
keep it marching forward. */
no_addr = block + rgd->rd_data0;
- goal++;
+ goal = max(block + 1, goal + 1);
if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
continue;
if (no_addr == skip)
@@ -971,7 +971,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
found++;
/* Limit reclaim to sensible number of tasks */
- if (found > 2*NR_CPUS)
+ if (found > NR_CPUS)
return;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a4e23d68a398..b9f28e66dad1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1318,15 +1318,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
static void gfs2_evict_inode(struct inode *inode)
{
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
+ struct super_block *sb = inode->i_sb;
+ struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
- if (inode->i_nlink)
+ if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
goto out;
- error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
+ /* Must not read inode block until block type has been verified */
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
goto out;
@@ -1336,6 +1338,12 @@ static void gfs2_evict_inode(struct inode *inode)
if (error)
goto out_truncate;
+ if (test_bit(GIF_INVALID, &ip->i_flags)) {
+ error = gfs2_inode_refresh(ip);
+ if (error)
+ goto out_truncate;
+ }
+
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 20af62f4304b..6e28000a4b21 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal,
int ret;
struct timespec now = current_kernel_time();
+ *cbh = NULL;
+
if (is_journal_aborted(journal))
return 0;
@@ -806,7 +808,7 @@ wait_for_iobuf:
if (err)
__jbd2_journal_abort_hard(journal);
}
- if (!err && !is_journal_aborted(journal))
+ if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index aba8ebaec25c..e0ec3db1c395 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2413,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device)
new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
if (!new_dev)
return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
+ bd = bdget(device);
spin_lock(&devname_cache_lock);
if (devcache[i]) {
if (devcache[i]->device == device) {
kfree(new_dev);
+ bdput(bd);
ret = devcache[i]->devname;
spin_unlock(&devname_cache_lock);
return ret;
@@ -2425,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device)
}
devcache[i] = new_dev;
devcache[i]->device = device;
- bd = bdget(device);
if (bd) {
bdevname(bd, devcache[i]->devname);
bdput(bd);
diff --git a/fs/namei.c b/fs/namei.c
index e6cd6113872c..54fc993e3027 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
do {
seq = read_seqcount_begin(&fs->seq);
nd->root = fs->root;
+ nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
} while (read_seqcount_retry(&fs->seq, seq));
}
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 7dba2ed03429..d99bcf59e4c2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1030,18 +1030,6 @@ const struct seq_operations mounts_op = {
.show = show_vfsmnt
};
-static int uuid_is_nil(u8 *uuid)
-{
- int i;
- u8 *cp = (u8 *)uuid;
-
- for (i = 0; i < 16; i++) {
- if (*cp++)
- return 0;
- }
- return 1;
-}
-
static int show_mountinfo(struct seq_file *m, void *v)
{
struct proc_mounts *p = m->private;
@@ -1085,10 +1073,6 @@ static int show_mountinfo(struct seq_file *m, void *v)
if (IS_MNT_UNBINDABLE(mnt))
seq_puts(m, " unbindable");
- if (!uuid_is_nil(mnt->mnt_sb->s_uuid))
- /* print the uuid */
- seq_printf(m, " uuid:%pU", mnt->mnt_sb->s_uuid);
-
/* Filesystem specific data */
seq_puts(m, " - ");
show_type(m, sb);
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 9166fcb66da2..89fc160fd5b0 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -148,67 +148,64 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors,
return pseudoflavor;
}
-static rpc_authflavor_t nfs_negotiate_security(const struct dentry *parent, const struct dentry *dentry)
+static int nfs_negotiate_security(const struct dentry *parent,
+ const struct dentry *dentry,
+ rpc_authflavor_t *flavor)
{
- int status = 0;
struct page *page;
struct nfs4_secinfo_flavors *flavors;
int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
- rpc_authflavor_t flavor = RPC_AUTH_UNIX;
+ int ret = -EPERM;
secinfo = NFS_PROTO(parent->d_inode)->secinfo;
if (secinfo != NULL) {
page = alloc_page(GFP_KERNEL);
if (!page) {
- status = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
flavors = page_address(page);
- status = secinfo(parent->d_inode, &dentry->d_name, flavors);
- flavor = nfs_find_best_sec(flavors, dentry->d_inode);
+ ret = secinfo(parent->d_inode, &dentry->d_name, flavors);
+ *flavor = nfs_find_best_sec(flavors, dentry->d_inode);
put_page(page);
}
- return flavor;
-
out:
- status = -ENOMEM;
- return status;
+ return ret;
}
-static rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
- struct dentry *dentry, struct path *path,
- struct nfs_fh *fh, struct nfs_fattr *fattr)
+static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent,
+ struct dentry *dentry, struct path *path,
+ struct nfs_fh *fh, struct nfs_fattr *fattr,
+ rpc_authflavor_t *flavor)
{
- rpc_authflavor_t flavor;
struct rpc_clnt *clone;
struct rpc_auth *auth;
int err;
- flavor = nfs_negotiate_security(parent, path->dentry);
- if (flavor < 0)
+ err = nfs_negotiate_security(parent, path->dentry, flavor);
+ if (err < 0)
goto out;
clone = rpc_clone_client(server->client);
- auth = rpcauth_create(flavor, clone);
+ auth = rpcauth_create(*flavor, clone);
if (!auth) {
- flavor = -EIO;
+ err = -EIO;
goto out_shutdown;
}
err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode,
&path->dentry->d_name,
fh, fattr);
- if (err < 0)
- flavor = err;
out_shutdown:
rpc_shutdown_client(clone);
out:
- return flavor;
+ return err;
}
#else /* CONFIG_NFS_V4 */
-static inline rpc_authflavor_t nfs_lookup_with_sec(struct nfs_server *server,
- struct dentry *parent, struct dentry *dentry,
- struct path *path, struct nfs_fh *fh,
- struct nfs_fattr *fattr)
+static inline int nfs_lookup_with_sec(struct nfs_server *server,
+ struct dentry *parent, struct dentry *dentry,
+ struct path *path, struct nfs_fh *fh,
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t *flavor)
{
return -EPERM;
}
@@ -234,7 +231,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
struct nfs_fh *fh = NULL;
struct nfs_fattr *fattr = NULL;
int err;
- rpc_authflavor_t flavor = 1;
+ rpc_authflavor_t flavor = RPC_AUTH_UNIX;
dprintk("--> nfs_d_automount()\n");
@@ -255,13 +252,8 @@ struct vfsmount *nfs_d_automount(struct path *path)
err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode,
&path->dentry->d_name,
fh, fattr);
- if (err == -EPERM) {
- flavor = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr);
- if (flavor < 0)
- err = flavor;
- else
- err = 0;
- }
+ if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL)
+ err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor);
dput(parent);
if (err != 0) {
mnt = ERR_PTR(err);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index dfd1e6d7e6c3..9bf41eab3e46 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2204,8 +2204,6 @@ static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandl
goto out;
}
ret = nfs4_lookup_root(server, fhandle, info);
- if (ret < 0)
- ret = -EAGAIN;
out:
return ret;
}
@@ -2226,7 +2224,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
for (i = 0; i < len; i++) {
status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
- if (status == 0)
+ if (status != -EPERM)
break;
}
if (status == 0)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af0c6279a4a7..e4cbc11a74ab 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -542,11 +542,15 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, u
if (!nfs_need_commit(nfsi))
return 0;
+ spin_lock(&inode->i_lock);
ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
if (ret > 0)
nfsi->ncommit -= ret;
+ spin_unlock(&inode->i_lock);
+
if (nfs_need_commit(NFS_I(inode)))
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
return ret;
}
#else
@@ -1483,9 +1487,7 @@ int nfs_commit_inode(struct inode *inode, int how)
res = nfs_commit_set_lock(NFS_I(inode), may_wait);
if (res <= 0)
goto out_mark_dirty;
- spin_lock(&inode->i_lock);
res = nfs_scan_commit(inode, &head, 0, 0);
- spin_unlock(&inode->i_lock);
if (res) {
int error;
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 0c6d81670137..7c831a2731fa 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
exp_readlock();
nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
fh_put(&fh);
- rqstp->rq_client = NULL;
exp_readunlock();
/* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 4b36ec3eb8ea..4cf04e11c66c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
if (atomic_dec_and_test(&fp->fi_delegees)) {
vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
fp->fi_lease = NULL;
+ fput(fp->fi_deleg_file);
fp->fi_deleg_file = NULL;
}
}
@@ -397,9 +398,12 @@ static void unhash_generic_stateid(struct nfs4_stateid *stp)
static void free_generic_stateid(struct nfs4_stateid *stp)
{
- int oflag = nfs4_access_bmap_to_omode(stp);
+ int oflag;
- nfs4_file_put_access(stp->st_file, oflag);
+ if (stp->st_access_bmap) {
+ oflag = nfs4_access_bmap_to_omode(stp);
+ nfs4_file_put_access(stp->st_file, oflag);
+ }
put_nfs4_file(stp->st_file);
kmem_cache_free(stateid_slab, stp);
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2e1cebde90df..129f3c9f62d5 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
goto out;
@@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dchild))
goto out_nfserr;
+ /* If file doesn't exist, check for permissions to create one */
+ if (!dchild->d_inode) {
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
+ if (err)
+ goto out;
+ }
+
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index b10e3540d5b7..ce4f62440425 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -1299,6 +1299,11 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
BUG_ON (!data || !frags);
+ if (size < 2 * VBLK_SIZE_HEAD) {
+ ldm_error("Value of size is to small.");
+ return false;
+ }
+
group = get_unaligned_be32(data + 0x08);
rec = get_unaligned_be16(data + 0x0C);
num = get_unaligned_be16(data + 0x0E);
@@ -1306,6 +1311,10 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
ldm_error ("A VBLK claims to have %d parts.", num);
return false;
}
+ if (rec >= num) {
+ ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
+ return false;
+ }
list_for_each (item, frags) {
f = list_entry (item, struct frag, list);
@@ -1334,10 +1343,9 @@ found:
f->map |= (1 << rec);
- if (num > 0) {
- data += VBLK_SIZE_HEAD;
- size -= VBLK_SIZE_HEAD;
- }
+ data += VBLK_SIZE_HEAD;
+ size -= VBLK_SIZE_HEAD;
+
memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
return true;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd6628d3ba42..dfa532730e55 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+ unsigned int nr;
+ struct task_struct *reaper;
struct tgid_iter iter;
struct pid_namespace *ns;
+ if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
+ goto out_no_task;
+ nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+ reaper = get_proc_task(filp->f_path.dentry->d_inode);
if (!reaper)
goto out_no_task;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index a925bf205497..d3c032f5fa0a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -442,7 +442,7 @@ EXPORT_SYMBOL(dquot_acquire);
*/
int dquot_commit(struct dquot *dquot)
{
- int ret = 0, ret2 = 0;
+ int ret = 0;
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
mutex_lock(&dqopt->dqio_mutex);
@@ -454,15 +454,10 @@ int dquot_commit(struct dquot *dquot)
spin_unlock(&dq_list_lock);
/* Inactive dquot can be only if there was error during read/init
* => we have better not writing it */
- if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
- if (info_dirty(&dqopt->info[dquot->dq_type])) {
- ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
- dquot->dq_sb, dquot->dq_type);
- }
- if (ret >= 0)
- ret = ret2;
- }
+ else
+ ret = -EIO;
out_sem:
mutex_unlock(&dqopt->dqio_mutex);
return ret;
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 9eead2c796b7..fbb0b478a346 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
SetPageDirty(page);
unlock_page(page);
+ put_page(page);
}
return 0;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 919f0de29d8f..e6493cac193d 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -23,6 +23,12 @@
#ifndef __UBIFS_DEBUG_H__
#define __UBIFS_DEBUG_H__
+/* Checking helper functions */
+typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
+ struct ubifs_zbranch *zbr, void *priv);
+typedef int (*dbg_znode_callback)(struct ubifs_info *c,
+ struct ubifs_znode *znode, void *priv);
+
#ifdef CONFIG_UBIFS_FS_DEBUG
/**
@@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c);
void dbg_dump_index(struct ubifs_info *c);
void dbg_dump_lpt_lebs(const struct ubifs_info *c);
-/* Checking helper functions */
-typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
- struct ubifs_zbranch *zbr, void *priv);
-typedef int (*dbg_znode_callback)(struct ubifs_info *c,
- struct ubifs_znode *znode, void *priv);
int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
dbg_znode_callback znode_cb, void *priv);
@@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
int dbg_check_filesystem(struct ubifs_info *c);
void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
int add_pos);
-int dbg_check_lprops(struct ubifs_info *c);
int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
int row, int col);
int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
@@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define DBGKEY(key) ((char *)(key))
#define DBGKEY1(key) ((char *)(key))
-#define ubifs_debugging_init(c) 0
-#define ubifs_debugging_exit(c) ({})
-
-#define dbg_ntype(type) ""
-#define dbg_cstate(cmt_state) ""
-#define dbg_jhead(jhead) ""
-#define dbg_get_key_dump(c, key) ({})
-#define dbg_dump_inode(c, inode) ({})
-#define dbg_dump_node(c, node) ({})
-#define dbg_dump_lpt_node(c, node, lnum, offs) ({})
-#define dbg_dump_budget_req(req) ({})
-#define dbg_dump_lstats(lst) ({})
-#define dbg_dump_budg(c) ({})
-#define dbg_dump_lprop(c, lp) ({})
-#define dbg_dump_lprops(c) ({})
-#define dbg_dump_lpt_info(c) ({})
-#define dbg_dump_leb(c, lnum) ({})
-#define dbg_dump_znode(c, znode) ({})
-#define dbg_dump_heap(c, heap, cat) ({})
-#define dbg_dump_pnode(c, pnode, parent, iip) ({})
-#define dbg_dump_tnc(c) ({})
-#define dbg_dump_index(c) ({})
-#define dbg_dump_lpt_lebs(c) ({})
-
-#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
-#define dbg_old_index_check_init(c, zroot) 0
-#define dbg_save_space_info(c) ({})
-#define dbg_check_space_info(c) 0
-#define dbg_check_old_index(c, zroot) 0
-#define dbg_check_cats(c) 0
-#define dbg_check_ltab(c) 0
-#define dbg_chk_lpt_free_spc(c) 0
-#define dbg_chk_lpt_sz(c, action, len) 0
-#define dbg_check_synced_i_size(inode) 0
-#define dbg_check_dir_size(c, dir) 0
-#define dbg_check_tnc(c, x) 0
-#define dbg_check_idx_size(c, idx_size) 0
-#define dbg_check_filesystem(c) 0
-#define dbg_check_heap(c, heap, cat, add_pos) ({})
-#define dbg_check_lprops(c) 0
-#define dbg_check_lpt_nodes(c, cnode, row, col) 0
-#define dbg_check_inode_size(c, inode, size) 0
-#define dbg_check_data_nodes_order(c, head) 0
-#define dbg_check_nondata_nodes_order(c, head) 0
-#define dbg_force_in_the_gaps_enabled 0
-#define dbg_force_in_the_gaps() 0
-#define dbg_failure_mode 0
-
-#define dbg_debugfs_init() 0
-#define dbg_debugfs_exit()
-#define dbg_debugfs_init_fs(c) 0
-#define dbg_debugfs_exit_fs(c) 0
+static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; }
+static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; }
+static inline const char *dbg_ntype(int type) { return ""; }
+static inline const char *dbg_cstate(int cmt_state) { return ""; }
+static inline const char *dbg_jhead(int jhead) { return ""; }
+static inline const char *
+dbg_get_key_dump(const struct ubifs_info *c,
+ const union ubifs_key *key) { return ""; }
+static inline void dbg_dump_inode(const struct ubifs_info *c,
+ const struct inode *inode) { return; }
+static inline void dbg_dump_node(const struct ubifs_info *c,
+ const void *node) { return; }
+static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
+ void *node, int lnum,
+ int offs) { return; }
+static inline void
+dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; }
+static inline void
+dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; }
+static inline void dbg_dump_budg(struct ubifs_info *c) { return; }
+static inline void dbg_dump_lprop(const struct ubifs_info *c,
+ const struct ubifs_lprops *lp) { return; }
+static inline void dbg_dump_lprops(struct ubifs_info *c) { return; }
+static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; }
+static inline void dbg_dump_leb(const struct ubifs_info *c,
+ int lnum) { return; }
+static inline void
+dbg_dump_znode(const struct ubifs_info *c,
+ const struct ubifs_znode *znode) { return; }
+static inline void dbg_dump_heap(struct ubifs_info *c,
+ struct ubifs_lpt_heap *heap,
+ int cat) { return; }
+static inline void dbg_dump_pnode(struct ubifs_info *c,
+ struct ubifs_pnode *pnode,
+ struct ubifs_nnode *parent,
+ int iip) { return; }
+static inline void dbg_dump_tnc(struct ubifs_info *c) { return; }
+static inline void dbg_dump_index(struct ubifs_info *c) { return; }
+static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; }
+
+static inline int dbg_walk_index(struct ubifs_info *c,
+ dbg_leaf_callback leaf_cb,
+ dbg_znode_callback znode_cb,
+ void *priv) { return 0; }
+static inline void dbg_save_space_info(struct ubifs_info *c) { return; }
+static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; }
+static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; }
+static inline int
+dbg_old_index_check_init(struct ubifs_info *c,
+ struct ubifs_zbranch *zroot) { return 0; }
+static inline int
+dbg_check_old_index(struct ubifs_info *c,
+ struct ubifs_zbranch *zroot) { return 0; }
+static inline int dbg_check_cats(struct ubifs_info *c) { return 0; }
+static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; }
+static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; }
+static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
+ int action, int len) { return 0; }
+static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; }
+static inline int dbg_check_dir_size(struct ubifs_info *c,
+ const struct inode *dir) { return 0; }
+static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; }
+static inline int dbg_check_idx_size(struct ubifs_info *c,
+ long long idx_size) { return 0; }
+static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; }
+static inline void dbg_check_heap(struct ubifs_info *c,
+ struct ubifs_lpt_heap *heap,
+ int cat, int add_pos) { return; }
+static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
+ struct ubifs_cnode *cnode, int row, int col) { return 0; }
+static inline int dbg_check_inode_size(struct ubifs_info *c,
+ const struct inode *inode,
+ loff_t size) { return 0; }
+static inline int
+dbg_check_data_nodes_order(struct ubifs_info *c,
+ struct list_head *head) { return 0; }
+static inline int
+dbg_check_nondata_nodes_order(struct ubifs_info *c,
+ struct list_head *head) { return 0; }
+
+static inline int dbg_force_in_the_gaps(void) { return 0; }
+#define dbg_force_in_the_gaps_enabled 0
+#define dbg_failure_mode 0
+
+static inline int dbg_debugfs_init(void) { return 0; }
+static inline void dbg_debugfs_exit(void) { return; }
+static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; }
+static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; }
#endif /* !CONFIG_UBIFS_FS_DEBUG */
#endif /* !__UBIFS_DEBUG_H__ */
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 28be1e6a65e8..b286db79c686 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync)
dbg_gen("syncing inode %lu", inode->i_ino);
+ if (inode->i_sb->s_flags & MS_RDONLY)
+ return 0;
+
/*
* VFS has already synchronized dirty pages for this inode. Synchronize
* the inode unless this is a 'datasync()' call.
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 936f2cbfe6b6..3dbad6fbd1eb 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -317,6 +317,32 @@ int ubifs_recover_master_node(struct ubifs_info *c)
goto out_free;
}
memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
+
+ /*
+ * We had to recover the master node, which means there was an
+ * unclean reboot. However, it is possible that the master node
+ * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
+ * E.g., consider the following chain of events:
+ *
+ * 1. UBIFS was cleanly unmounted, so the master node is clean
+ * 2. UBIFS is being mounted R/W and starts changing the master
+ * node in the first (%UBIFS_MST_LNUM). A power cut happens,
+ * so this LEB ends up with some amount of garbage at the
+ * end.
+ * 3. UBIFS is being mounted R/O. We reach this place and
+ * recover the master node from the second LEB
+ * (%UBIFS_MST_LNUM + 1). But we cannot update the media
+ * because we are being mounted R/O. We have to defer the
+ * operation.
+ * 4. However, this master node (@c->mst_node) is marked as
+ * clean (since the step 1). And if we just return, the
+ * mount code will be confused and won't recover the master
+ * node when it is re-mounter R/W later.
+ *
+ * Thus, to force the recovery by marking the master node as
+ * dirty.
+ */
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
} else {
/* Write the recovered master node */
c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index c75f6133206c..be6c7b008f38 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1671,14 +1671,25 @@ static int ubifs_remount_rw(struct ubifs_info *c)
if (err)
goto out;
+ dbg_gen("re-mounted read-write");
+ c->remounting_rw = 0;
+
if (c->need_recovery) {
c->need_recovery = 0;
ubifs_msg("deferred recovery completed");
+ } else {
+ /*
+ * Do not run the debugging space check if the were doing
+ * recovery, because when we saved the information we had the
+ * file-system in a state where the TNC and lprops has been
+ * modified in memory, but all the I/O operations (including a
+ * commit) were deferred. So the file-system was in
+ * "non-committed" state. Now the file-system is in committed
+ * state, and of course the amount of free space will change
+ * because, for example, the old index size was imprecise.
+ */
+ err = dbg_check_space_info(c);
}
-
- dbg_gen("re-mounted read-write");
- c->remounting_rw = 0;
- err = dbg_check_space_info(c);
mutex_unlock(&c->umount_mutex);
return err;
@@ -1761,10 +1772,12 @@ static void ubifs_put_super(struct super_block *sb)
* of the media. For example, there will be dirty inodes if we failed
* to write them back because of I/O errors.
*/
- ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0);
- ubifs_assert(c->budg_idx_growth == 0);
- ubifs_assert(c->budg_dd_growth == 0);
- ubifs_assert(c->budg_data_growth == 0);
+ if (!c->ro_error) {
+ ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0);
+ ubifs_assert(c->budg_idx_growth == 0);
+ ubifs_assert(c->budg_dd_growth == 0);
+ ubifs_assert(c->budg_data_growth == 0);
+ }
/*
* The 'c->umount_lock' prevents races between UBIFS memory shrinker
diff --git a/fs/xattr.c b/fs/xattr.c
index a19acdb81cd1..f1ef94974dea 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -666,7 +666,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (!handler)
return -EOPNOTSUPP;
- return handler->set(dentry, name, value, size, 0, handler->flags);
+ return handler->set(dentry, name, value, size, flags, handler->flags);
}
/*
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5ea402023ebd..9ef9ed2cfe2e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -293,7 +293,6 @@ xfs_buf_allocate_memory(
size_t nbytes, offset;
gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i;
- pgoff_t first;
xfs_off_t end;
int error;
@@ -333,7 +332,6 @@ use_alloc_page:
return error;
offset = bp->b_offset;
- first = bp->b_file_offset >> PAGE_SHIFT;
bp->b_flags |= _XBF_PAGES;
for (i = 0; i < bp->b_page_count; i++) {
@@ -657,8 +655,6 @@ xfs_buf_readahead(
xfs_off_t ioff,
size_t isize)
{
- struct backing_dev_info *bdi;
-
if (bdi_read_congested(target->bt_bdi))
return;
@@ -919,8 +915,6 @@ xfs_buf_lock(
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
- if (atomic_read(&bp->b_io_remaining))
- blk_flush_plug(current);
down(&bp->b_sema);
XB_SET_OWNER(bp);
@@ -1309,8 +1303,6 @@ xfs_buf_iowait(
{
trace_xfs_buf_iowait(bp, _RET_IP_);
- if (atomic_read(&bp->b_io_remaining))
- blk_flush_plug(current);
wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_);
@@ -1747,8 +1739,8 @@ xfsbufd(
do {
long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
- int count = 0;
struct list_head tmp;
+ struct blk_plug plug;
if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
@@ -1764,16 +1756,15 @@ xfsbufd(
xfs_buf_delwri_split(target, &tmp, age);
list_sort(NULL, &tmp, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
while (!list_empty(&tmp)) {
struct xfs_buf *bp;
bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_bdstrat_cb(bp);
- count++;
}
- if (count)
- blk_flush_plug(current);
-
+ blk_finish_plug(&plug);
} while (!kthread_should_stop());
return 0;
@@ -1793,6 +1784,7 @@ xfs_flush_buftarg(
int pincount = 0;
LIST_HEAD(tmp_list);
LIST_HEAD(wait_list);
+ struct blk_plug plug;
xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
@@ -1807,6 +1799,8 @@ xfs_flush_buftarg(
* we do that after issuing all the IO.
*/
list_sort(NULL, &tmp_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
while (!list_empty(&tmp_list)) {
bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
ASSERT(target == bp->b_target);
@@ -1817,10 +1811,10 @@ xfs_flush_buftarg(
}
xfs_bdstrat_cb(bp);
}
+ blk_finish_plug(&plug);
if (wait) {
- /* Expedite and wait for IO to complete. */
- blk_flush_plug(current);
+ /* Wait for IO to complete. */
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
index 508e06fd7d1e..9f76cceb678d 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -28,53 +28,49 @@
/*
* XFS logging functions
*/
-static int
+static void
__xfs_printk(
const char *level,
const struct xfs_mount *mp,
struct va_format *vaf)
{
- if (mp && mp->m_fsname)
- return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
- return printk("%sXFS: %pV\n", level, vaf);
+ if (mp && mp->m_fsname) {
+ printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
+ return;
+ }
+ printk("%sXFS: %pV\n", level, vaf);
}
-int xfs_printk(
+void xfs_printk(
const char *level,
const struct xfs_mount *mp,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
- int r;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- r = __xfs_printk(level, mp, &vaf);
+ __xfs_printk(level, mp, &vaf);
va_end(args);
-
- return r;
}
#define define_xfs_printk_level(func, kern_level) \
-int func(const struct xfs_mount *mp, const char *fmt, ...) \
+void func(const struct xfs_mount *mp, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
- int r; \
\
va_start(args, fmt); \
\
vaf.fmt = fmt; \
vaf.va = &args; \
\
- r = __xfs_printk(kern_level, mp, &vaf); \
+ __xfs_printk(kern_level, mp, &vaf); \
va_end(args); \
- \
- return r; \
} \
define_xfs_printk_level(xfs_emerg, KERN_EMERG);
@@ -88,7 +84,7 @@ define_xfs_printk_level(xfs_info, KERN_INFO);
define_xfs_printk_level(xfs_debug, KERN_DEBUG);
#endif
-int
+void
xfs_alert_tag(
const struct xfs_mount *mp,
int panic_tag,
@@ -97,7 +93,6 @@ xfs_alert_tag(
struct va_format vaf;
va_list args;
int do_panic = 0;
- int r;
if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) {
xfs_printk(KERN_ALERT, mp,
@@ -110,12 +105,10 @@ xfs_alert_tag(
vaf.fmt = fmt;
vaf.va = &args;
- r = __xfs_printk(KERN_ALERT, mp, &vaf);
+ __xfs_printk(KERN_ALERT, mp, &vaf);
va_end(args);
BUG_ON(do_panic);
-
- return r;
}
void
diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h
index e77ffa16745b..f1b3fc1b6c4e 100644
--- a/fs/xfs/linux-2.6/xfs_message.h
+++ b/fs/xfs/linux-2.6/xfs_message.h
@@ -3,32 +3,34 @@
struct xfs_mount;
-extern int xfs_printk(const char *level, const struct xfs_mount *mp,
+extern void xfs_printk(const char *level, const struct xfs_mount *mp,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
-extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_alert_tag(const struct xfs_mount *mp, int tag,
+extern void xfs_alert_tag(const struct xfs_mount *mp, int tag,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
-extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
#ifdef DEBUG
-extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
#else
-#define xfs_debug(mp, fmt, ...) (0)
+static inline void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
+{
+}
#endif
extern void assfail(char *expr, char *f, int l);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1ba5c451da36..b38e58d02299 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -816,75 +816,6 @@ xfs_setup_devices(
return 0;
}
-/*
- * XFS AIL push thread support
- */
-void
-xfsaild_wakeup(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
-{
- /* only ever move the target forwards */
- if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
- ailp->xa_target = threshold_lsn;
- wake_up_process(ailp->xa_task);
- }
-}
-
-STATIC int
-xfsaild(
- void *data)
-{
- struct xfs_ail *ailp = data;
- xfs_lsn_t last_pushed_lsn = 0;
- long tout = 0; /* milliseconds */
-
- while (!kthread_should_stop()) {
- /*
- * for short sleeps indicating congestion, don't allow us to
- * get woken early. Otherwise all we do is bang on the AIL lock
- * without making progress.
- */
- if (tout && tout <= 20)
- __set_current_state(TASK_KILLABLE);
- else
- __set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(tout ?
- msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
-
- /* swsusp */
- try_to_freeze();
-
- ASSERT(ailp->xa_mount->m_log);
- if (XFS_FORCED_SHUTDOWN(ailp->xa_mount))
- continue;
-
- tout = xfsaild_push(ailp, &last_pushed_lsn);
- }
-
- return 0;
-} /* xfsaild */
-
-int
-xfsaild_start(
- struct xfs_ail *ailp)
-{
- ailp->xa_target = 0;
- ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
- ailp->xa_mount->m_fsname);
- if (IS_ERR(ailp->xa_task))
- return -PTR_ERR(ailp->xa_task);
- return 0;
-}
-
-void
-xfsaild_stop(
- struct xfs_ail *ailp)
-{
- kthread_stop(ailp->xa_task);
-}
-
-
/* Catch misguided souls that try to use this interface on XFS */
STATIC struct inode *
xfs_fs_alloc_inode(
@@ -1191,22 +1122,12 @@ xfs_fs_sync_fs(
return -error;
if (laptop_mode) {
- int prev_sync_seq = mp->m_sync_seq;
-
/*
* The disk must be active because we're syncing.
* We schedule xfssyncd now (now that the disk is
* active) instead of later (when it might not be).
*/
- wake_up_process(mp->m_sync_task);
- /*
- * We have to wait for the sync iteration to complete.
- * If we don't, the disk activity caused by the sync
- * will come after the sync is completed, and that
- * triggers another sync from laptop mode.
- */
- wait_event(mp->m_wait_single_sync_task,
- mp->m_sync_seq != prev_sync_seq);
+ flush_delayed_work_sync(&mp->m_sync_work);
}
return 0;
@@ -1490,9 +1411,6 @@ xfs_fs_fill_super(
spin_lock_init(&mp->m_sb_lock);
mutex_init(&mp->m_growlock);
atomic_set(&mp->m_active_trans, 0);
- INIT_LIST_HEAD(&mp->m_sync_list);
- spin_lock_init(&mp->m_sync_lock);
- init_waitqueue_head(&mp->m_wait_single_sync_task);
mp->m_super = sb;
sb->s_fs_info = mp;
@@ -1799,6 +1717,38 @@ xfs_destroy_zones(void)
}
STATIC int __init
+xfs_init_workqueues(void)
+{
+ /*
+ * max_active is set to 8 to give enough concurency to allow
+ * multiple work operations on each CPU to run. This allows multiple
+ * filesystems to be running sync work concurrently, and scales with
+ * the number of CPUs in the system.
+ */
+ xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
+ if (!xfs_syncd_wq)
+ goto out;
+
+ xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
+ if (!xfs_ail_wq)
+ goto out_destroy_syncd;
+
+ return 0;
+
+out_destroy_syncd:
+ destroy_workqueue(xfs_syncd_wq);
+out:
+ return -ENOMEM;
+}
+
+STATIC void
+xfs_destroy_workqueues(void)
+{
+ destroy_workqueue(xfs_ail_wq);
+ destroy_workqueue(xfs_syncd_wq);
+}
+
+STATIC int __init
init_xfs_fs(void)
{
int error;
@@ -1813,10 +1763,14 @@ init_xfs_fs(void)
if (error)
goto out;
- error = xfs_mru_cache_init();
+ error = xfs_init_workqueues();
if (error)
goto out_destroy_zones;
+ error = xfs_mru_cache_init();
+ if (error)
+ goto out_destroy_wq;
+
error = xfs_filestream_init();
if (error)
goto out_mru_cache_uninit;
@@ -1833,6 +1787,10 @@ init_xfs_fs(void)
if (error)
goto out_cleanup_procfs;
+ error = xfs_init_workqueues();
+ if (error)
+ goto out_sysctl_unregister;
+
vfs_initquota();
error = register_filesystem(&xfs_fs_type);
@@ -1850,6 +1808,8 @@ init_xfs_fs(void)
xfs_filestream_uninit();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
+ out_destroy_wq:
+ xfs_destroy_workqueues();
out_destroy_zones:
xfs_destroy_zones();
out:
@@ -1866,6 +1826,7 @@ exit_xfs_fs(void)
xfs_buf_terminate();
xfs_filestream_uninit();
xfs_mru_cache_uninit();
+ xfs_destroy_workqueues();
xfs_destroy_zones();
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 9cf35a688f53..e4f9c1b0836c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -22,6 +22,7 @@
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
@@ -39,6 +40,8 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
+struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
+
/*
* The inode lookup is done in batches to keep the amount of lock traffic and
* radix tree lookups to a minimum. The batch size is a trade off between
@@ -431,62 +434,12 @@ xfs_quiesce_attr(
xfs_unmountfs_writesb(mp);
}
-/*
- * Enqueue a work item to be picked up by the vfs xfssyncd thread.
- * Doing this has two advantages:
- * - It saves on stack space, which is tight in certain situations
- * - It can be used (with care) as a mechanism to avoid deadlocks.
- * Flushing while allocating in a full filesystem requires both.
- */
-STATIC void
-xfs_syncd_queue_work(
- struct xfs_mount *mp,
- void *data,
- void (*syncer)(struct xfs_mount *, void *),
- struct completion *completion)
-{
- struct xfs_sync_work *work;
-
- work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
- INIT_LIST_HEAD(&work->w_list);
- work->w_syncer = syncer;
- work->w_data = data;
- work->w_mount = mp;
- work->w_completion = completion;
- spin_lock(&mp->m_sync_lock);
- list_add_tail(&work->w_list, &mp->m_sync_list);
- spin_unlock(&mp->m_sync_lock);
- wake_up_process(mp->m_sync_task);
-}
-
-/*
- * Flush delayed allocate data, attempting to free up reserved space
- * from existing allocations. At this point a new allocation attempt
- * has failed with ENOSPC and we are in the process of scratching our
- * heads, looking about for more room...
- */
-STATIC void
-xfs_flush_inodes_work(
- struct xfs_mount *mp,
- void *arg)
-{
- struct inode *inode = arg;
- xfs_sync_data(mp, SYNC_TRYLOCK);
- xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
- iput(inode);
-}
-
-void
-xfs_flush_inodes(
- xfs_inode_t *ip)
+static void
+xfs_syncd_queue_sync(
+ struct xfs_mount *mp)
{
- struct inode *inode = VFS_I(ip);
- DECLARE_COMPLETION_ONSTACK(completion);
-
- igrab(inode);
- xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
- wait_for_completion(&completion);
- xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
+ queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work,
+ msecs_to_jiffies(xfs_syncd_centisecs * 10));
}
/*
@@ -496,9 +449,10 @@ xfs_flush_inodes(
*/
STATIC void
xfs_sync_worker(
- struct xfs_mount *mp,
- void *unused)
+ struct work_struct *work)
{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_sync_work);
int error;
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
@@ -508,73 +462,106 @@ xfs_sync_worker(
error = xfs_fs_log_dummy(mp);
else
xfs_log_force(mp, 0);
- xfs_reclaim_inodes(mp, 0);
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
+
+ /* start pushing all the metadata that is currently dirty */
+ xfs_ail_push_all(mp->m_ail);
}
- mp->m_sync_seq++;
- wake_up(&mp->m_wait_single_sync_task);
+
+ /* queue us up again */
+ xfs_syncd_queue_sync(mp);
}
-STATIC int
-xfssyncd(
- void *arg)
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs syncd work default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_syncd_queue_reclaim(
+ struct xfs_mount *mp)
{
- struct xfs_mount *mp = arg;
- long timeleft;
- xfs_sync_work_t *work, *n;
- LIST_HEAD (tmp);
-
- set_freezable();
- timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
- for (;;) {
- if (list_empty(&mp->m_sync_list))
- timeleft = schedule_timeout_interruptible(timeleft);
- /* swsusp */
- try_to_freeze();
- if (kthread_should_stop() && list_empty(&mp->m_sync_list))
- break;
- spin_lock(&mp->m_sync_lock);
- /*
- * We can get woken by laptop mode, to do a sync -
- * that's the (only!) case where the list would be
- * empty with time remaining.
- */
- if (!timeleft || list_empty(&mp->m_sync_list)) {
- if (!timeleft)
- timeleft = xfs_syncd_centisecs *
- msecs_to_jiffies(10);
- INIT_LIST_HEAD(&mp->m_sync_work.w_list);
- list_add_tail(&mp->m_sync_work.w_list,
- &mp->m_sync_list);
- }
- list_splice_init(&mp->m_sync_list, &tmp);
- spin_unlock(&mp->m_sync_lock);
+ /*
+ * We can have inodes enter reclaim after we've shut down the syncd
+ * workqueue during unmount, so don't allow reclaim work to be queued
+ * during unmount.
+ */
+ if (!(mp->m_super->s_flags & MS_ACTIVE))
+ return;
- list_for_each_entry_safe(work, n, &tmp, w_list) {
- (*work->w_syncer)(mp, work->w_data);
- list_del(&work->w_list);
- if (work == &mp->m_sync_work)
- continue;
- if (work->w_completion)
- complete(work->w_completion);
- kmem_free(work);
- }
+ rcu_read_lock();
+ if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work,
+ msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
+ rcu_read_unlock();
+}
- return 0;
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+STATIC void
+xfs_reclaim_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_reclaim_work);
+
+ xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+ xfs_syncd_queue_reclaim(mp);
+}
+
+/*
+ * Flush delayed allocate data, attempting to free up reserved space
+ * from existing allocations. At this point a new allocation attempt
+ * has failed with ENOSPC and we are in the process of scratching our
+ * heads, looking about for more room.
+ *
+ * Queue a new data flush if there isn't one already in progress and
+ * wait for completion of the flush. This means that we only ever have one
+ * inode flush in progress no matter how many ENOSPC events are occurring and
+ * so will prevent the system from bogging down due to every concurrent
+ * ENOSPC event scanning all the active inodes in the system for writeback.
+ */
+void
+xfs_flush_inodes(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ queue_work(xfs_syncd_wq, &mp->m_flush_work);
+ flush_work_sync(&mp->m_flush_work);
+}
+
+STATIC void
+xfs_flush_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(work,
+ struct xfs_mount, m_flush_work);
+
+ xfs_sync_data(mp, SYNC_TRYLOCK);
+ xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
}
int
xfs_syncd_init(
struct xfs_mount *mp)
{
- mp->m_sync_work.w_syncer = xfs_sync_worker;
- mp->m_sync_work.w_mount = mp;
- mp->m_sync_work.w_completion = NULL;
- mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
- if (IS_ERR(mp->m_sync_task))
- return -PTR_ERR(mp->m_sync_task);
+ INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
+ INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker);
+ INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
+
+ xfs_syncd_queue_sync(mp);
+ xfs_syncd_queue_reclaim(mp);
+
return 0;
}
@@ -582,7 +569,9 @@ void
xfs_syncd_stop(
struct xfs_mount *mp)
{
- kthread_stop(mp->m_sync_task);
+ cancel_delayed_work_sync(&mp->m_sync_work);
+ cancel_delayed_work_sync(&mp->m_reclaim_work);
+ cancel_work_sync(&mp->m_flush_work);
}
void
@@ -601,6 +590,10 @@ __xfs_inode_set_reclaim_tag(
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
spin_unlock(&ip->i_mount->m_perag_lock);
+
+ /* schedule periodic background inode reclaim */
+ xfs_syncd_queue_reclaim(ip->i_mount);
+
trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
-1, _RET_IP_);
}
@@ -1017,7 +1010,13 @@ xfs_reclaim_inodes(
}
/*
- * Shrinker infrastructure.
+ * Inode cache shrinker.
+ *
+ * When called we make sure that there is a background (fast) inode reclaim in
+ * progress, while we will throttle the speed of reclaim via doiing synchronous
+ * reclaim of inodes. That means if we come across dirty inodes, we wait for
+ * them to be cleaned, which we hope will not be very long due to the
+ * background walker having already kicked the IO off on those dirty inodes.
*/
static int
xfs_reclaim_inode_shrink(
@@ -1032,10 +1031,15 @@ xfs_reclaim_inode_shrink(
mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) {
+ /* kick background reclaimer and push the AIL */
+ xfs_syncd_queue_reclaim(mp);
+ xfs_ail_push_all(mp->m_ail);
+
if (!(gfp_mask & __GFP_FS))
return -1;
- xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan);
+ xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT,
+ &nr_to_scan);
/* terminate if we don't exhaust the scan */
if (nr_to_scan > 0)
return -1;
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index 32ba6628290c..e3a6ad27415f 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -32,6 +32,8 @@ typedef struct xfs_sync_work {
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
+extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
+
int xfs_syncd_init(struct xfs_mount *mp);
void xfs_syncd_stop(struct xfs_mount *mp);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 254ee062bd7d..69228aa8605a 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -461,12 +461,10 @@ xfs_qm_dqflush_all(
struct xfs_quotainfo *q = mp->m_quotainfo;
int recl;
struct xfs_dquot *dqp;
- int niters;
int error;
if (!q)
return 0;
- niters = 0;
again:
mutex_lock(&q->qi_dqlist_lock);
list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
@@ -1314,14 +1312,9 @@ xfs_qm_dqiter_bufs(
{
xfs_buf_t *bp;
int error;
- int notcommitted;
- int incr;
int type;
ASSERT(blkcnt > 0);
- notcommitted = 0;
- incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
- XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
(flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
error = 0;
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index c9446f1c726d..567b29b9f1b3 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -65,11 +65,6 @@ extern kmem_zone_t *qm_dqtrxzone;
* block in the dquot/xqm code.
*/
#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
-/*
- * When doing a quotacheck, we log dquot clusters of this many FSBs at most
- * in a single transaction. We don't want to ask for too huge a log reservation.
- */
-#define XFS_QM_MAX_DQCLUSTER_LOGSZ 3
typedef xfs_dqhash_t xfs_dqlist_t;
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 0d62a07b7fd8..2dadb15d5ca9 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -313,14 +313,12 @@ xfs_qm_scall_quotaon(
{
int error;
uint qf;
- uint accflags;
__int64_t sbflags;
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/*
* Switching on quota accounting must be done at mount time.
*/
- accflags = flags & XFS_ALL_QUOTA_ACCT;
flags &= ~(XFS_ALL_QUOTA_ACCT);
sbflags = 0;
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 4bc3c649aee4..27d64d752eab 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2395,17 +2395,33 @@ xfs_free_extent(
memset(&args, 0, sizeof(xfs_alloc_arg_t));
args.tp = tp;
args.mp = tp->t_mountp;
+
+ /*
+ * validate that the block number is legal - the enables us to detect
+ * and handle a silent filesystem corruption rather than crashing.
+ */
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
- ASSERT(args.agno < args.mp->m_sb.sb_agcount);
+ if (args.agno >= args.mp->m_sb.sb_agcount)
+ return EFSCORRUPTED;
+
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
+ if (args.agbno >= args.mp->m_sb.sb_agblocks)
+ return EFSCORRUPTED;
+
args.pag = xfs_perag_get(args.mp, args.agno);
- if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
+ ASSERT(args.pag);
+
+ error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
+ if (error)
goto error0;
-#ifdef DEBUG
- ASSERT(args.agbp != NULL);
- ASSERT((args.agbno + len) <=
- be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length));
-#endif
+
+ /* validate the extent size is legal now we have the agf locked */
+ if (args.agbno + len >
+ be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
+ error = EFSCORRUPTED;
+ goto error0;
+ }
+
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
error0:
xfs_perag_put(args.pag);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 46cc40131d4a..576fdfe81d60 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -198,6 +198,41 @@ xfs_inode_item_size(
}
/*
+ * xfs_inode_item_format_extents - convert in-core extents to on-disk form
+ *
+ * For either the data or attr fork in extent format, we need to endian convert
+ * the in-core extent as we place them into the on-disk inode. In this case, we
+ * need to do this conversion before we write the extents into the log. Because
+ * we don't have the disk inode to write into here, we allocate a buffer and
+ * format the extents into it via xfs_iextents_copy(). We free the buffer in
+ * the unlock routine after the copy for the log has been made.
+ *
+ * In the case of the data fork, the in-core and on-disk fork sizes can be
+ * different due to delayed allocation extents. We only log on-disk extents
+ * here, so always use the physical fork size to determine the size of the
+ * buffer we need to allocate.
+ */
+STATIC void
+xfs_inode_item_format_extents(
+ struct xfs_inode *ip,
+ struct xfs_log_iovec *vecp,
+ int whichfork,
+ int type)
+{
+ xfs_bmbt_rec_t *ext_buffer;
+
+ ext_buffer = kmem_alloc(XFS_IFORK_SIZE(ip, whichfork), KM_SLEEP);
+ if (whichfork == XFS_DATA_FORK)
+ ip->i_itemp->ili_extents_buf = ext_buffer;
+ else
+ ip->i_itemp->ili_aextents_buf = ext_buffer;
+
+ vecp->i_addr = ext_buffer;
+ vecp->i_len = xfs_iextents_copy(ip, ext_buffer, whichfork);
+ vecp->i_type = type;
+}
+
+/*
* This is called to fill in the vector of log iovecs for the
* given inode log item. It fills the first item with an inode
* log format structure, the second with the on-disk inode structure,
@@ -213,7 +248,6 @@ xfs_inode_item_format(
struct xfs_inode *ip = iip->ili_inode;
uint nvecs;
size_t data_bytes;
- xfs_bmbt_rec_t *ext_buffer;
xfs_mount_t *mp;
vecp->i_addr = &iip->ili_format;
@@ -320,22 +354,8 @@ xfs_inode_item_format(
} else
#endif
{
- /*
- * There are delayed allocation extents
- * in the inode, or we need to convert
- * the extents to on disk format.
- * Use xfs_iextents_copy()
- * to copy only the real extents into
- * a separate buffer. We'll free the
- * buffer in the unlock routine.
- */
- ext_buffer = kmem_alloc(ip->i_df.if_bytes,
- KM_SLEEP);
- iip->ili_extents_buf = ext_buffer;
- vecp->i_addr = ext_buffer;
- vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_DATA_FORK);
- vecp->i_type = XLOG_REG_TYPE_IEXT;
+ xfs_inode_item_format_extents(ip, vecp,
+ XFS_DATA_FORK, XLOG_REG_TYPE_IEXT);
}
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
iip->ili_format.ilf_dsize = vecp->i_len;
@@ -445,19 +465,12 @@ xfs_inode_item_format(
*/
vecp->i_addr = ip->i_afp->if_u1.if_extents;
vecp->i_len = ip->i_afp->if_bytes;
+ vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
#else
ASSERT(iip->ili_aextents_buf == NULL);
- /*
- * Need to endian flip before logging
- */
- ext_buffer = kmem_alloc(ip->i_afp->if_bytes,
- KM_SLEEP);
- iip->ili_aextents_buf = ext_buffer;
- vecp->i_addr = ext_buffer;
- vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
- XFS_ATTR_FORK);
+ xfs_inode_item_format_extents(ip, vecp,
+ XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT);
#endif
- vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
iip->ili_format.ilf_asize = vecp->i_len;
vecp++;
nvecs++;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index dc1882adaf54..751e94fe1f77 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -204,7 +204,6 @@ xfs_bulkstat(
xfs_agi_t *agi; /* agi header data */
xfs_agino_t agino; /* inode # in allocation group */
xfs_agnumber_t agno; /* allocation group number */
- xfs_daddr_t bno; /* inode cluster start daddr */
int chunkidx; /* current index into inode chunk */
int clustidx; /* current index into inode cluster */
xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -463,7 +462,6 @@ xfs_bulkstat(
mp->m_sb.sb_inopblog);
}
ino = XFS_AGINO_TO_INO(mp, agno, agino);
- bno = XFS_AGB_TO_DADDR(mp, agno, agbno);
/*
* Skip if this inode is free.
*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 25efa9b8a602..b612ce4520ae 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
break;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
- if (!xfs_trans_ail_tail(log->l_ailp) &&
+ if (!xfs_ail_min_lsn(log->l_ailp) &&
xlog_iclogs_empty(log)) {
if (log->l_covered_state == XLOG_STATE_COVER_NEED)
log->l_covered_state = XLOG_STATE_COVER_DONE;
@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
- tail_lsn = xfs_trans_ail_tail(mp->m_ail);
+ tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
* the filesystem is shutting down.
*/
if (!XLOG_FORCED_SHUTDOWN(log))
- xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+ xfs_ail_push(log->l_ailp, threshold_lsn);
}
/*
@@ -3407,6 +3407,17 @@ xlog_verify_dest_ptr(
xfs_emerg(log->l_mp, "%s: invalid ptr", __func__);
}
+/*
+ * Check to make sure the grant write head didn't just over lap the tail. If
+ * the cycles are the same, we can't be overlapping. Otherwise, make sure that
+ * the cycles differ by exactly one and check the byte count.
+ *
+ * This check is run unlocked, so can give false positives. Rather than assert
+ * on failures, use a warn-once flag and a panic tag to allow the admin to
+ * determine if they want to panic the machine when such an error occurs. For
+ * debug kernels this will have the same effect as using an assert but, unlinke
+ * an assert, it can be turned off at runtime.
+ */
STATIC void
xlog_verify_grant_tail(
struct log *log)
@@ -3414,17 +3425,22 @@ xlog_verify_grant_tail(
int tail_cycle, tail_blocks;
int cycle, space;
- /*
- * Check to make sure the grant write head didn't just over lap the
- * tail. If the cycles are the same, we can't be overlapping.
- * Otherwise, make sure that the cycles differ by exactly one and
- * check the byte count.
- */
xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
if (tail_cycle != cycle) {
- ASSERT(cycle - 1 == tail_cycle);
- ASSERT(space <= BBTOB(tail_blocks));
+ if (cycle - 1 != tail_cycle &&
+ !(log->l_flags & XLOG_TAIL_WARN)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+ "%s: cycle - 1 != tail_cycle", __func__);
+ log->l_flags |= XLOG_TAIL_WARN;
+ }
+
+ if (space > BBTOB(tail_blocks) &&
+ !(log->l_flags & XLOG_TAIL_WARN)) {
+ xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
+ "%s: space > BBTOB(tail_blocks)", __func__);
+ log->l_flags |= XLOG_TAIL_WARN;
+ }
}
}
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ffae692c9832..5864850e9e34 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -144,6 +144,7 @@ static inline uint xlog_get_client_id(__be32 i)
#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */
#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being
shutdown */
+#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */
#ifdef __KERNEL__
/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a62e8971539d..19af0ab0d0c6 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -203,12 +203,9 @@ typedef struct xfs_mount {
struct mutex m_icsb_mutex; /* balancer sync lock */
#endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
- struct task_struct *m_sync_task; /* generalised sync thread */
- xfs_sync_work_t m_sync_work; /* work item for VFS_SYNC */
- struct list_head m_sync_list; /* sync thread work item list */
- spinlock_t m_sync_lock; /* work item list lock */
- int m_sync_seq; /* sync thread generation no. */
- wait_queue_head_t m_wait_single_sync_task;
+ struct delayed_work m_sync_work; /* background sync work */
+ struct delayed_work m_reclaim_work; /* background inode reclaim */
+ struct work_struct m_flush_work; /* background inode flush */
__int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */
struct shrinker m_inode_shrink; /* inode reclaim shrinker */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 12aff9584e29..acdb92f14d51 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,74 +28,138 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
-STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
-STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
-STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
+struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
#ifdef DEBUG
-STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *);
-#else
+/*
+ * Check that the list is sorted as it should be.
+ */
+STATIC void
+xfs_ail_check(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ xfs_log_item_t *prev_lip;
+
+ if (list_empty(&ailp->xa_ail))
+ return;
+
+ /*
+ * Check the next and previous entries are valid.
+ */
+ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+ prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+
+ prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
+
+
+#ifdef XFS_TRANS_DEBUG
+ /*
+ * Walk the list checking lsn ordering, and that every entry has the
+ * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
+ * when specifically debugging the transaction subsystem.
+ */
+ prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+ list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
+ if (&prev_lip->li_ail != &ailp->xa_ail)
+ ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
+ ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
+ prev_lip = lip;
+ }
+#endif /* XFS_TRANS_DEBUG */
+}
+#else /* !DEBUG */
#define xfs_ail_check(a,l)
#endif /* DEBUG */
+/*
+ * Return a pointer to the first item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_min(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
+}
+
+ /*
+ * Return a pointer to the last item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
+/*
+ * Return a pointer to the item which follows the given item in the AIL. If
+ * the given item is the last item in the list, then return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_next(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ if (lip->li_ail.next == &ailp->xa_ail)
+ return NULL;
+
+ return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
+}
/*
- * This is called by the log manager code to determine the LSN
- * of the tail of the log. This is exactly the LSN of the first
- * item in the AIL. If the AIL is empty, then this function
- * returns 0.
+ * This is called by the log manager code to determine the LSN of the tail of
+ * the log. This is exactly the LSN of the first item in the AIL. If the AIL
+ * is empty, then this function returns 0.
*
- * We need the AIL lock in order to get a coherent read of the
- * lsn of the last item in the AIL.
+ * We need the AIL lock in order to get a coherent read of the lsn of the last
+ * item in the AIL.
*/
xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
struct xfs_ail *ailp)
{
- xfs_lsn_t lsn;
+ xfs_lsn_t lsn = 0;
xfs_log_item_t *lip;
spin_lock(&ailp->xa_lock);
lip = xfs_ail_min(ailp);
- if (lip == NULL) {
- lsn = (xfs_lsn_t)0;
- } else {
+ if (lip)
lsn = lip->li_lsn;
- }
spin_unlock(&ailp->xa_lock);
return lsn;
}
/*
- * xfs_trans_push_ail
- *
- * This routine is called to move the tail of the AIL forward. It does this by
- * trying to flush items in the AIL whose lsns are below the given
- * threshold_lsn.
- *
- * the push is run asynchronously in a separate thread, so we return the tail
- * of the log right now instead of the tail after the push. This means we will
- * either continue right away, or we will sleep waiting on the async thread to
- * do its work.
- *
- * We do this unlocked - we only need to know whether there is anything in the
- * AIL at the time we are called. We don't need to access the contents of
- * any of the objects, so the lock is not needed.
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
*/
-void
-xfs_trans_ail_push(
- struct xfs_ail *ailp,
- xfs_lsn_t threshold_lsn)
+static xfs_lsn_t
+xfs_ail_max_lsn(
+ struct xfs_ail *ailp)
{
- xfs_log_item_t *lip;
+ xfs_lsn_t lsn = 0;
+ xfs_log_item_t *lip;
- lip = xfs_ail_min(ailp);
- if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) {
- if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0)
- xfsaild_wakeup(ailp, threshold_lsn);
- }
+ spin_lock(&ailp->xa_lock);
+ lip = xfs_ail_max(ailp);
+ if (lip)
+ lsn = lip->li_lsn;
+ spin_unlock(&ailp->xa_lock);
+
+ return lsn;
}
/*
@@ -236,16 +300,57 @@ out:
}
/*
- * xfsaild_push does the work of pushing on the AIL. Returning a timeout of
- * zero indicates that the caller should sleep until woken.
+ * splice the log item list into the AIL at the given LSN.
*/
-long
-xfsaild_push(
- struct xfs_ail *ailp,
- xfs_lsn_t *last_lsn)
+static void
+xfs_ail_splice(
+ struct xfs_ail *ailp,
+ struct list_head *list,
+ xfs_lsn_t lsn)
{
- long tout = 0;
- xfs_lsn_t last_pushed_lsn = *last_lsn;
+ xfs_log_item_t *next_lip;
+
+ /* If the list is empty, just insert the item. */
+ if (list_empty(&ailp->xa_ail)) {
+ list_splice(list, &ailp->xa_ail);
+ return;
+ }
+
+ list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
+ if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
+ break;
+ }
+
+ ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
+ XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
+
+ list_splice_init(list, &next_lip->li_ail);
+}
+
+/*
+ * Delete the given item from the AIL. Return a pointer to the item.
+ */
+static void
+xfs_ail_delete(
+ struct xfs_ail *ailp,
+ xfs_log_item_t *lip)
+{
+ xfs_ail_check(ailp, lip);
+ list_del(&lip->li_ail);
+ xfs_trans_ail_cursor_clear(ailp, lip);
+}
+
+/*
+ * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
+ * to run at a later time if there is more work to do to complete the push.
+ */
+STATIC void
+xfs_ail_worker(
+ struct work_struct *work)
+{
+ struct xfs_ail *ailp = container_of(to_delayed_work(work),
+ struct xfs_ail, xa_work);
+ long tout;
xfs_lsn_t target = ailp->xa_target;
xfs_lsn_t lsn;
xfs_log_item_t *lip;
@@ -256,15 +361,15 @@ xfsaild_push(
spin_lock(&ailp->xa_lock);
xfs_trans_ail_cursor_init(ailp, cur);
- lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn);
+ lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
/*
* AIL is empty or our push has reached the end.
*/
xfs_trans_ail_cursor_done(ailp, cur);
spin_unlock(&ailp->xa_lock);
- *last_lsn = 0;
- return tout;
+ ailp->xa_last_pushed_lsn = 0;
+ return;
}
XFS_STATS_INC(xs_push_ail);
@@ -301,13 +406,13 @@ xfsaild_push(
case XFS_ITEM_SUCCESS:
XFS_STATS_INC(xs_push_ail_success);
IOP_PUSH(lip);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
break;
case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
push_xfsbufd = 1;
break;
@@ -319,7 +424,7 @@ xfsaild_push(
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
- last_pushed_lsn = lsn;
+ ailp->xa_last_pushed_lsn = lsn;
stuck++;
break;
@@ -374,9 +479,23 @@ xfsaild_push(
wake_up_process(mp->m_ddev_targp->bt_task);
}
+ /* assume we have more work to do in a short while */
+ tout = 10;
if (!count) {
/* We're past our target or empty, so idle */
- last_pushed_lsn = 0;
+ ailp->xa_last_pushed_lsn = 0;
+
+ /*
+ * Check for an updated push target before clearing the
+ * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
+ * work to do. Wait a bit longer before starting that work.
+ */
+ smp_rmb();
+ if (ailp->xa_target == target) {
+ clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+ return;
+ }
+ tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
* We reached the target so wait a bit longer for I/O to
@@ -384,7 +503,7 @@ xfsaild_push(
* start the next scan from the start of the AIL.
*/
tout = 50;
- last_pushed_lsn = 0;
+ ailp->xa_last_pushed_lsn = 0;
} else if ((stuck * 100) / count > 90) {
/*
* Either there is a lot of contention on the AIL or we
@@ -396,14 +515,61 @@ xfsaild_push(
* continuing from where we were.
*/
tout = 20;
- } else {
- /* more to do, but wait a short while before continuing */
- tout = 10;
}
- *last_lsn = last_pushed_lsn;
- return tout;
+
+ /* There is more to do, requeue us. */
+ queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
+ msecs_to_jiffies(tout));
+}
+
+/*
+ * This routine is called to move the tail of the AIL forward. It does this by
+ * trying to flush items in the AIL whose lsns are below the given
+ * threshold_lsn.
+ *
+ * The push is run asynchronously in a workqueue, which means the caller needs
+ * to handle waiting on the async flush for space to become available.
+ * We don't want to interrupt any push that is in progress, hence we only queue
+ * work if we set the pushing bit approriately.
+ *
+ * We do this unlocked - we only need to know whether there is anything in the
+ * AIL at the time we are called. We don't need to access the contents of
+ * any of the objects, so the lock is not needed.
+ */
+void
+xfs_ail_push(
+ struct xfs_ail *ailp,
+ xfs_lsn_t threshold_lsn)
+{
+ xfs_log_item_t *lip;
+
+ lip = xfs_ail_min(ailp);
+ if (!lip || XFS_FORCED_SHUTDOWN(ailp->xa_mount) ||
+ XFS_LSN_CMP(threshold_lsn, ailp->xa_target) <= 0)
+ return;
+
+ /*
+ * Ensure that the new target is noticed in push code before it clears
+ * the XFS_AIL_PUSHING_BIT.
+ */
+ smp_wmb();
+ ailp->xa_target = threshold_lsn;
+ if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
+ queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
}
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+ struct xfs_ail *ailp)
+{
+ xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
+
+ if (threshold_lsn)
+ xfs_ail_push(ailp, threshold_lsn);
+}
/*
* This is to be called when an item is unlocked that may have
@@ -615,7 +781,6 @@ xfs_trans_ail_init(
xfs_mount_t *mp)
{
struct xfs_ail *ailp;
- int error;
ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
if (!ailp)
@@ -624,15 +789,9 @@ xfs_trans_ail_init(
ailp->xa_mount = mp;
INIT_LIST_HEAD(&ailp->xa_ail);
spin_lock_init(&ailp->xa_lock);
- error = xfsaild_start(ailp);
- if (error)
- goto out_free_ailp;
+ INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
mp->m_ail = ailp;
return 0;
-
-out_free_ailp:
- kmem_free(ailp);
- return error;
}
void
@@ -641,124 +800,6 @@ xfs_trans_ail_destroy(
{
struct xfs_ail *ailp = mp->m_ail;
- xfsaild_stop(ailp);
+ cancel_delayed_work_sync(&ailp->xa_work);
kmem_free(ailp);
}
-
-/*
- * splice the log item list into the AIL at the given LSN.
- */
-STATIC void
-xfs_ail_splice(
- struct xfs_ail *ailp,
- struct list_head *list,
- xfs_lsn_t lsn)
-{
- xfs_log_item_t *next_lip;
-
- /*
- * If the list is empty, just insert the item.
- */
- if (list_empty(&ailp->xa_ail)) {
- list_splice(list, &ailp->xa_ail);
- return;
- }
-
- list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
- if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
- break;
- }
-
- ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
- (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
-
- list_splice_init(list, &next_lip->li_ail);
- return;
-}
-
-/*
- * Delete the given item from the AIL. Return a pointer to the item.
- */
-STATIC void
-xfs_ail_delete(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- xfs_ail_check(ailp, lip);
- list_del(&lip->li_ail);
- xfs_trans_ail_cursor_clear(ailp, lip);
-}
-
-/*
- * Return a pointer to the first item in the AIL.
- * If the AIL is empty, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_min(
- struct xfs_ail *ailp)
-{
- if (list_empty(&ailp->xa_ail))
- return NULL;
-
- return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
-}
-
-/*
- * Return a pointer to the item which follows
- * the given item in the AIL. If the given item
- * is the last item in the list, then return NULL.
- */
-STATIC xfs_log_item_t *
-xfs_ail_next(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- if (lip->li_ail.next == &ailp->xa_ail)
- return NULL;
-
- return list_first_entry(&lip->li_ail, xfs_log_item_t, li_ail);
-}
-
-#ifdef DEBUG
-/*
- * Check that the list is sorted as it should be.
- */
-STATIC void
-xfs_ail_check(
- struct xfs_ail *ailp,
- xfs_log_item_t *lip)
-{
- xfs_log_item_t *prev_lip;
-
- if (list_empty(&ailp->xa_ail))
- return;
-
- /*
- * Check the next and previous entries are valid.
- */
- ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
- prev_lip = list_entry(lip->li_ail.prev, xfs_log_item_t, li_ail);
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
-
- prev_lip = list_entry(lip->li_ail.next, xfs_log_item_t, li_ail);
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
-
-
-#ifdef XFS_TRANS_DEBUG
- /*
- * Walk the list checking lsn ordering, and that every entry has the
- * XFS_LI_IN_AIL flag set. This is really expensive, so only do it
- * when specifically debugging the transaction subsystem.
- */
- prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
- list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
- if (&prev_lip->li_ail != &ailp->xa_ail)
- ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
- ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
- prev_lip = lip;
- }
-#endif /* XFS_TRANS_DEBUG */
-}
-#endif /* DEBUG */
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35162c238fa3..6b164e9e9a1f 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -65,16 +65,22 @@ struct xfs_ail_cursor {
struct xfs_ail {
struct xfs_mount *xa_mount;
struct list_head xa_ail;
- uint xa_gen;
- struct task_struct *xa_task;
xfs_lsn_t xa_target;
struct xfs_ail_cursor xa_cursors;
spinlock_t xa_lock;
+ struct delayed_work xa_work;
+ xfs_lsn_t xa_last_pushed_lsn;
+ unsigned long xa_flags;
};
+#define XFS_AIL_PUSHING_BIT 0
+
/*
* From xfs_trans_ail.c
*/
+
+extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
+
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_log_item **log_items, int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock);
@@ -98,12 +104,13 @@ xfs_trans_ail_delete(
xfs_trans_ail_delete_bulk(ailp, &lip, 1);
}
-void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push_all(struct xfs_ail *);
+xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
+
void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *);
-xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
-
struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn);
@@ -112,11 +119,6 @@ struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur);
-long xfsaild_push(struct xfs_ail *, xfs_lsn_t *);
-void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t);
-int xfsaild_start(struct xfs_ail *);
-void xfsaild_stop(struct xfs_ail *);
-
#if BITS_PER_LONG != 64
static inline void
xfs_trans_ail_copy_lsn(
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index e612575a2596..b4326bfa684f 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -23,11 +23,11 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
- while (test_bit(bitnum, addr)) {
- preempt_enable();
+ preempt_enable();
+ do {
cpu_relax();
- preempt_disable();
- }
+ } while (test_bit(bitnum, addr));
+ preempt_disable();
}
#endif
__acquire(bitlock);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 32176cc8e715..2ad95fa1d130 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -388,20 +388,19 @@ struct request_queue
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
-#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
-#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
-#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
-#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
+#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
+#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
+#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -697,8 +696,9 @@ extern void blk_start_queue(struct request_queue *q);
extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
+extern void __blk_run_queue(struct request_queue *q);
extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *);
struct blk_plug {
unsigned long magic;
struct list_head list;
+ struct list_head cb_list;
unsigned int should_sort;
};
+struct blk_plug_cb {
+ struct list_head list;
+ void (*callback)(struct blk_plug_cb *);
+};
extern void blk_start_plug(struct blk_plug *);
extern void blk_finish_plug(struct blk_plug *);
-extern void __blk_flush_plug(struct task_struct *, struct blk_plug *);
+extern void blk_flush_plug_list(struct blk_plug *, bool);
static inline void blk_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
- if (unlikely(plug))
- __blk_flush_plug(tsk, plug);
+ if (plug)
+ blk_flush_plug_list(plug, false);
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ if (plug)
+ blk_flush_plug_list(plug, true);
}
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
- return plug && !list_empty(&plug->list);
+ return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
}
/*
@@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task)
{
}
+static inline void blk_schedule_flush_plug(struct task_struct *task)
+{
+}
+
+
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
return false;
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 8e20540043f5..089fe43211a4 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -12,6 +12,7 @@
/**
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
+ * @irq_flags: - IRQF configuration flags
* @board_specific_setup: - called before probing the chip (power,reset)
* @transceiver_enable: - called to power on/off the transceiver
* @power_enable: - called to power on/off the mcp *and* the
@@ -24,6 +25,7 @@
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
+ unsigned long irq_flags;
int (*board_specific_setup)(struct spi_device *spi);
int (*transceiver_enable)(int enable);
int (*power_enable) (int enable);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f2afed4fa945..19d90a55541d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -197,7 +197,7 @@ struct dentry_operations {
* typically using d_splice_alias. */
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
-#define DCACHE_UNHASHED 0x0010
+#define DCACHE_RCUACCESS 0x0010 /* Entry has ever been RCU-visible */
#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020
/* Parent inode is watched by inotify */
@@ -384,7 +384,7 @@ extern struct dentry *dget_parent(struct dentry *dentry);
static inline int d_unhashed(struct dentry *dentry)
{
- return (dentry->d_flags & DCACHE_UNHASHED);
+ return hlist_bl_unhashed(&dentry->d_hash);
}
static inline int d_unlinked(struct dentry *dentry)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e2768834f397..32a4423710f5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,7 +197,6 @@ struct dm_target {
struct dm_target_callbacks {
struct list_head list;
int (*congested_fn) (struct dm_target_callbacks *, int);
- void (*unplug_fn)(struct dm_target_callbacks *);
};
int dm_register_target(struct target_type *t);
diff --git a/include/linux/input.h b/include/linux/input.h
index f3a7794a18c4..771d6d85667d 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -167,6 +167,7 @@ struct input_keymap_entry {
#define SYN_REPORT 0
#define SYN_CONFIG 1
#define SYN_MT_REPORT 2
+#define SYN_DROPPED 3
/*
* Keys and buttons
@@ -553,8 +554,8 @@ struct input_keymap_entry {
#define KEY_DVD 0x185 /* Media Select DVD */
#define KEY_AUX 0x186
#define KEY_MP3 0x187
-#define KEY_AUDIO 0x188
-#define KEY_VIDEO 0x189
+#define KEY_AUDIO 0x188 /* AL Audio Browser */
+#define KEY_VIDEO 0x189 /* AL Movie Browser */
#define KEY_DIRECTORY 0x18a
#define KEY_LIST 0x18b
#define KEY_MEMO 0x18c /* Media Select Messages */
@@ -603,8 +604,9 @@ struct input_keymap_entry {
#define KEY_FRAMEFORWARD 0x1b5
#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */
#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */
-#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
-#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
+#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
+#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
+#define KEY_IMAGES 0x1ba /* AL Image Browser */
#define KEY_DEL_EOL 0x1c0
#define KEY_DEL_EOS 0x1c1
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index b3ac06a4435d..318bb82325a6 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot)
input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
}
+static inline bool input_is_mt_axis(int axis)
+{
+ return axis == ABS_MT_SLOT ||
+ (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST);
+}
+
void input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 7f675aa81d87..04f32a3eb26b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -137,8 +137,6 @@ enum {
ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
- ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */
- ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
@@ -198,6 +196,7 @@ enum {
* management */
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */
+ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 5bad17d1acde..31f9d75adc5b 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -2,6 +2,7 @@
#define _LINUX_LIST_BL_H
#include <linux/list.h>
+#include <linux/bit_spinlock.h>
/*
* Special version of lists, where head of the list has a lock in the lowest
@@ -114,6 +115,16 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
}
}
+static inline void hlist_bl_lock(struct hlist_bl_head *b)
+{
+ bit_spin_lock(0, (unsigned long *)b);
+}
+
+static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+{
+ __bit_spin_unlock(0, (unsigned long *)b);
+}
+
/**
* hlist_bl_for_each_entry - iterate over list of given type
* @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 5a5ce7055839..5e9840f50980 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -216,7 +216,7 @@ static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
return ;
}
-static inline inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
+static inline void mem_cgroup_rotate_reclaimable_page(struct page *page)
{
return ;
}
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index ad1b19aa6508..aef23309a742 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -86,16 +86,25 @@ extern int mfd_clone_cell(const char *cell, const char **clones,
*/
static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
{
- return pdev->dev.platform_data;
+ return pdev->mfd_cell;
}
/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the .mfd_data entry from the mfd_cell that created it.
+ * Otherwise just return the platform_data pointer.
+ * This maintains compatibility with platform drivers whose devices aren't
+ * created by the mfd layer, and expect platform_data to contain what would've
+ * otherwise been in mfd_data.
*/
static inline void *mfd_get_data(struct platform_device *pdev)
{
- return mfd_get_cell(pdev)->mfd_data;
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+
+ if (cell)
+ return cell->mfd_data;
+ else
+ return pdev->dev.platform_data;
}
extern int mfd_add_devices(struct device *parent, int id,
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index eeec00abb664..7fa95df60146 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -270,7 +270,8 @@ struct nf_afinfo {
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
- int (*route)(struct dst_entry **dst, struct flowi *fl);
+ int (*route)(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
int (*reroute)(struct sk_buff *skb,
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index ec333d83f3b4..5a262e3ae715 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -293,7 +293,7 @@ struct ip_set {
/* Lock protecting the set data */
rwlock_t lock;
/* References to the set */
- atomic_t ref;
+ u32 ref;
/* The core set type */
struct ip_set_type *type;
/* The type variant doing the real job */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index ec9d9bea1e37..a0196ac79051 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -515,8 +515,7 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
if (h->netmask != HOST_MASK)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
#endif
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
if (with_timeout(h->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 31afb7ecbe1f..cdced84261d7 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr);
*/
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-int next_pidmap(struct pid_namespace *pid_ns, int last);
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index d96db9825708..744942c95fec 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -14,6 +14,8 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+struct mfd_cell;
+
struct platform_device {
const char * name;
int id;
@@ -23,6 +25,9 @@ struct platform_device {
const struct platform_device_id *id_entry;
+ /* MFD cell pointer */
+ struct mfd_cell *mfd_cell;
+
/* arch specific additions */
struct pdev_archdata archdata;
};
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 369e19d3750b..7f1183dcd119 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/posix-timers.h>
+#include <linux/rwsem.h>
struct posix_clock;
@@ -104,7 +105,7 @@ struct posix_clock_operations {
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
* @kref: Reference count.
- * @mutex: Protects the 'zombie' field from concurrent access.
+ * @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
* @release: A function to free the structure when the reference count reaches
* zero. May be NULL if structure is statically allocated.
@@ -117,7 +118,7 @@ struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
struct kref kref;
- struct mutex mutex;
+ struct rw_semaphore rwsem;
bool zombie;
void (*release)(struct posix_clock *clk);
};
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 4e37a7cfa726..4d50611112ba 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -396,7 +396,7 @@ union rio_pw_msg {
};
/* Architecture and hardware-specific functions */
-extern void rio_register_mport(struct rio_mport *);
+extern int rio_register_mport(struct rio_mport *);
extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
extern void rio_close_inb_mbox(struct rio_mport *, int);
extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 7410d3365e2a..0cee0152aca9 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -35,6 +35,7 @@
#define RIO_DID_IDTCPS6Q 0x035f
#define RIO_DID_IDTCPS10Q 0x035e
#define RIO_DID_IDTCPS1848 0x0374
+#define RIO_DID_IDTCPS1432 0x0375
#define RIO_DID_IDTCPS1616 0x0379
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 2ca7e8a78060..877ece45426f 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
extern int rtc_set_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
+extern int rtc_initialize_alarm(struct rtc_device *rtc,
+ struct rtc_wkalrm *alrm);
extern void rtc_update_irq(struct rtc_device *rtc,
unsigned long num, unsigned long events);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ec2c027e92c..18d63cea2848 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1254,6 +1254,9 @@ struct task_struct {
#endif
struct mm_struct *mm, *active_mm;
+#ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+#endif
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index ca02f1716736..8ce59ef3e5af 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1456,7 +1456,7 @@ struct security_operations {
struct inode *new_dir, struct dentry *new_dentry);
int (*inode_readlink) (struct dentry *dentry);
int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
- int (*inode_permission) (struct inode *inode, int mask);
+ int (*inode_permission) (struct inode *inode, int mask, unsigned flags);
int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
int (*inode_setxattr) (struct dentry *dentry, const char *name,
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a89e3612875..083ffea7ba18 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -249,6 +249,8 @@ extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
extern int hibernate(void);
extern bool system_entering_hibernation(void);
#else /* CONFIG_HIBERNATION */
+static inline void register_nosave_region(unsigned long b, unsigned long e) {}
+static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
@@ -297,14 +299,7 @@ static inline bool pm_wakeup_pending(void) { return false; }
extern struct mutex pm_mutex;
-#ifndef CONFIG_HIBERNATION
-static inline void register_nosave_region(unsigned long b, unsigned long e)
-{
-}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e)
-{
-}
-
+#ifndef CONFIG_HIBERNATE_CALLBACKS
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 3c7329b8ea0e..0e1855079fbb 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -103,8 +103,8 @@ struct driver_info {
* Indicates to usbnet, that USB driver accumulates multiple IP packets.
* Affects statistic (counters) and short packet handling.
*/
-#define FLAG_MULTI_PACKET 0x1000
-#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */
+#define FLAG_MULTI_PACKET 0x2000
+#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 461c0119664f..2b3831b58aa4 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -58,6 +58,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
UNEVICTABLE_MLOCKFREED,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ THP_FAULT_ALLOC,
+ THP_FAULT_FALLBACK,
+ THP_COLLAPSE_ALLOC,
+ THP_COLLAPSE_ALLOC_FAILED,
+ THP_SPLIT,
+#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index cdf2e8ac4309..d2df55b0c213 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -139,8 +139,6 @@ do { \
*/
enum p9_msg_t {
- P9_TSYNCFS = 0,
- P9_RSYNCFS,
P9_TLERROR = 6,
P9_RLERROR,
P9_TSTATFS = 8,
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 85c1413f054d..051a99f79769 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt);
void p9_client_begin_disconnect(struct p9_client *clnt);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
char *uname, u32 n_uname, char *aname);
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
- int clone);
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+ char **wnames, int clone);
int p9_client_open(struct p9_fid *fid, int mode);
int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
char *extension);
@@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
gid_t gid, struct p9_qid *qid);
int p9_client_clunk(struct p9_fid *fid);
int p9_client_fsync(struct p9_fid *fid, int datasync);
-int p9_client_sync_fs(struct p9_fid *fid);
int p9_client_remove(struct p9_fid *fid);
int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
u64 offset, u32 count);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 814b434db749..d516f00c8e0f 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -52,7 +52,7 @@ static inline struct net *skb_net(const struct sk_buff *skb)
*/
if (likely(skb->dev && skb->dev->nd_net))
return dev_net(skb->dev);
- if (skb_dst(skb)->dev)
+ if (skb_dst(skb) && skb_dst(skb)->dev)
return dev_net(skb_dst(skb)->dev);
WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
__func__, __LINE__);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cb13239fe8e3..025d4cc7bbf8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1753,8 +1753,19 @@ enum ieee80211_ampdu_mlme_action {
* that TX/RX_STOP can pass NULL for this parameter.
* The @buf_size parameter is only valid when the action is set to
* %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- * buffer size (number of subframes) for this session -- aggregates
- * containing more subframes than this may not be transmitted to the peer.
+ * buffer size (number of subframes) for this session -- the driver
+ * may neither send aggregates containing more subframes than this
+ * nor send aggregates in a way that lost frames would exceed the
+ * buffer size. If just limiting the aggregate size, this would be
+ * possible with a buf_size of 8:
+ * - TX: 1.....7
+ * - RX: 2....7 (lost frame #1)
+ * - TX: 8..1...
+ * which is invalid since #1 was now re-transmitted well past the
+ * buffer size of 8. Correct ways to retransmit #1 would be:
+ * - TX: 1 or 18 or 81
+ * Even "189" would be wrong since 1 could be lost again.
+ *
* Returns a negative error code on failure.
* The callback can sleep.
*
diff --git a/include/net/route.h b/include/net/route.h
index f88429cad52a..8fce0621cad1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -64,6 +64,7 @@ struct rtable {
__be32 rt_dst; /* Path destination */
__be32 rt_src; /* Path source */
+ int rt_route_iif;
int rt_iif;
int rt_oif;
__u32 rt_mark;
@@ -80,12 +81,12 @@ struct rtable {
static inline bool rt_is_input_route(struct rtable *rt)
{
- return rt->rt_iif != 0;
+ return rt->rt_route_iif != 0;
}
static inline bool rt_is_output_route(struct rtable *rt)
{
- return rt->rt_iif == 0;
+ return rt->rt_route_iif == 0;
}
struct ip_rt_acct {
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 78f18adb49c8..bf366547da25 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
DECLARE_EVENT_CLASS(block_unplug,
- TP_PROTO(struct request_queue *q),
+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
- TP_ARGS(q),
+ TP_ARGS(q, depth, explicit),
TP_STRUCT__entry(
__field( int, nr_rq )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
),
TP_fast_assign(
- __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
+ __entry->nr_rq = depth;
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
),
@@ -419,31 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
);
/**
- * block_unplug_timer - timed release of operations requests in queue to device driver
- * @q: request queue to unplug
- *
- * Unplug the request queue @q because a timer expired and allow block
- * operation requests to be sent to the device driver.
- */
-DEFINE_EVENT(block_unplug, block_unplug_timer,
-
- TP_PROTO(struct request_queue *q),
-
- TP_ARGS(q)
-);
-
-/**
- * block_unplug_io - release of operations requests in request queue
+ * block_unplug - release of operations requests in request queue
* @q: request queue to unplug
+ * @depth: number of requests just added to the queue
+ * @explicit: whether this was an explicit unplug, or one from schedule()
*
* Unplug request queue @q because device driver is scheduled to work
* on elements in the request queue.
*/
-DEFINE_EVENT(block_unplug, block_unplug_io,
+DEFINE_EVENT(block_unplug, block_unplug,
- TP_PROTO(struct request_queue *q),
+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
- TP_ARGS(q)
+ TP_ARGS(q, depth, explicit)
);
/**
diff --git a/kernel/futex.c b/kernel/futex.c
index dfb924ffe65b..fe28dc282eae 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1886,7 +1886,7 @@ retry:
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
- restart->futex.flags = flags;
+ restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
ret = -ERESTART_RESTARTBLOCK;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 55936f9cb251..87b77de03dd3 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
#include <linux/vmalloc.h>
#include <linux/swap.h>
#include <linux/kmsg_dump.h>
+#include <linux/syscore_ops.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -1532,6 +1533,11 @@ int kernel_kexec(void)
local_irq_disable();
/* Suspend system devices */
error = sysdev_suspend(PMSG_FREEZE);
+ if (!error) {
+ error = syscore_suspend();
+ if (error)
+ sysdev_resume();
+ }
if (error)
goto Enable_irqs;
} else
@@ -1546,6 +1552,7 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
+ syscore_resume();
sysdev_resume();
Enable_irqs:
local_irq_enable();
diff --git a/kernel/pid.c b/kernel/pid.c
index 02f221274265..57a8346a270e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
return -1;
}
-int next_pidmap(struct pid_namespace *pid_ns, int last)
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
{
int offset;
struct pidmap *map, *end;
+ if (last >= PID_MAX_LIMIT)
+ return -1;
+
offset = (last + 1) & BITS_PER_PAGE_MASK;
map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
end = &pid_ns->pidmap[PIDMAP_ENTRIES];
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4603f08dc47b..6de9a8fc3417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config HIBERNATE_CALLBACKS
+ bool
+
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on SWAP && ARCH_HIBERNATION_POSSIBLE
+ select HIBERNATE_CALLBACKS
select LZO_COMPRESS
select LZO_DECOMPRESS
---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
config PM_SLEEP
def_bool y
- depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
+ depends on SUSPEND || HIBERNATE_CALLBACKS
config PM_SLEEP_SMP
def_bool y
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index aeabd26e3342..50aae660174d 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -273,8 +273,11 @@ static int create_image(int platform_mode)
local_irq_disable();
error = sysdev_suspend(PMSG_FREEZE);
- if (!error)
+ if (!error) {
error = syscore_suspend();
+ if (error)
+ sysdev_resume();
+ }
if (error) {
printk(KERN_ERR "PM: Some system devices failed to power down, "
"aborting hibernation\n");
@@ -407,8 +410,11 @@ static int resume_target_kernel(bool platform_mode)
local_irq_disable();
error = sysdev_suspend(PMSG_QUIESCE);
- if (!error)
+ if (!error) {
error = syscore_suspend();
+ if (error)
+ sysdev_resume();
+ }
if (error)
goto Enable_irqs;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 2814c32aed51..8935369d503a 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -164,8 +164,11 @@ static int suspend_enter(suspend_state_t state)
BUG_ON(!irqs_disabled());
error = sysdev_suspend(PMSG_SUSPEND);
- if (!error)
+ if (!error) {
error = syscore_suspend();
+ if (error)
+ sysdev_resume();
+ }
if (!error) {
if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
error = suspend_ops->enter(state);
diff --git a/kernel/sched.c b/kernel/sched.c
index 48013633d792..312f8b95c2d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4111,20 +4111,20 @@ need_resched:
try_to_wake_up_local(to_wakeup);
}
deactivate_task(rq, prev, DEQUEUE_SLEEP);
+
+ /*
+ * If we are going to sleep and we have plugged IO queued, make
+ * sure to submit it to avoid deadlocks.
+ */
+ if (blk_needs_flush_plug(prev)) {
+ raw_spin_unlock(&rq->lock);
+ blk_schedule_flush_plug(prev);
+ raw_spin_lock(&rq->lock);
+ }
}
switch_count = &prev->nvcsw;
}
- /*
- * If we are going to sleep and we have plugged IO queued, make
- * sure to submit it to avoid deadlocks.
- */
- if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
- raw_spin_unlock(&rq->lock);
- blk_flush_plug(prev);
- raw_spin_lock(&rq->lock);
- }
-
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7f00772e57c9..6fa833ab2cb8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
enum cpu_idle_type idle, int *all_pinned,
int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
{
- int loops = 0, pulled = 0, pinned = 0;
+ int loops = 0, pulled = 0;
long rem_load_move = max_load_move;
struct task_struct *p, *n;
if (max_load_move == 0)
goto out;
- pinned = 1;
-
list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
if (loops++ > sysctl_sched_nr_migrate)
break;
if ((p->se.load.weight >> 1) > rem_load_move ||
- !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
+ !can_migrate_task(p, busiest, this_cpu, sd, idle,
+ all_pinned))
continue;
pull_task(busiest, p, this_rq, this_cpu);
@@ -2153,9 +2152,6 @@ out:
*/
schedstat_add(sd, lb_gained[idle], pulled);
- if (all_pinned)
- *all_pinned = pinned;
-
return max_load_move - rem_load_move;
}
@@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
+ sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
+
/*
* If the busiest group is imbalanced the below checks don't
* work because they assumes all things are equal, which typically
@@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* Don't pull any tasks if this group is already above the domain
* average load.
*/
- sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
if (sds.this_load >= sds.avg_load)
goto out_balanced;
@@ -3340,6 +3337,7 @@ redo:
* still unbalanced. ld_moved simply stays zero, so it is
* correctly treated as an imbalance.
*/
+ all_pinned = 1;
local_irq_save(flags);
double_rq_lock(this_rq, busiest);
ld_moved = move_tasks(this_rq, this_cpu, busiest,
diff --git a/kernel/signal.c b/kernel/signal.c
index 29e233fd7a0f..7165af5f1b11 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2711,8 +2711,8 @@ out:
/**
* sys_rt_sigaction - alter an action taken by a process
* @sig: signal to be sent
- * @act: the thread group ID of the thread
- * @oact: the PID of the thread
+ * @act: new sigaction
+ * @oact: used to save the previous sigaction
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE4(rt_sigaction, int, sig,
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 25028dd4fa18..c340ca658f37 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -19,7 +19,6 @@
*/
#include <linux/device.h>
#include <linux/file.h>
-#include <linux/mutex.h>
#include <linux/posix-clock.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
@@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp)
{
struct posix_clock *clk = fp->private_data;
- mutex_lock(&clk->mutex);
+ down_read(&clk->rwsem);
if (!clk->zombie)
return clk;
- mutex_unlock(&clk->mutex);
+ up_read(&clk->rwsem);
return NULL;
}
static void put_posix_clock(struct posix_clock *clk)
{
- mutex_unlock(&clk->mutex);
+ up_read(&clk->rwsem);
}
static ssize_t posix_clock_read(struct file *fp, char __user *buf,
@@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
struct posix_clock *clk =
container_of(inode->i_cdev, struct posix_clock, cdev);
- mutex_lock(&clk->mutex);
+ down_read(&clk->rwsem);
if (clk->zombie) {
err = -ENODEV;
@@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
fp->private_data = clk;
}
out:
- mutex_unlock(&clk->mutex);
+ up_read(&clk->rwsem);
return err;
}
@@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid)
int err;
kref_init(&clk->kref);
- mutex_init(&clk->mutex);
+ init_rwsem(&clk->rwsem);
cdev_init(&clk->cdev, &posix_clock_file_operations);
clk->cdev.owner = clk->ops.owner;
err = cdev_add(&clk->cdev, devid, 1);
- if (err)
- goto no_cdev;
return err;
-no_cdev:
- mutex_destroy(&clk->mutex);
- return err;
}
EXPORT_SYMBOL_GPL(posix_clock_register);
static void delete_clock(struct kref *kref)
{
struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
- mutex_destroy(&clk->mutex);
+
if (clk->release)
clk->release(clk);
}
@@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk)
{
cdev_del(&clk->cdev);
- mutex_lock(&clk->mutex);
+ down_write(&clk->rwsem);
clk->zombie = true;
- mutex_unlock(&clk->mutex);
+ up_write(&clk->rwsem);
kref_put(&clk->kref, delete_clock);
}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7aa40f8e182d..6957aa298dfa 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
}
-static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
+static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
+ unsigned int depth, bool explicit)
{
struct blk_trace *bt = q->blk_trace;
if (bt) {
- unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
- __be64 rpdu = cpu_to_be64(pdu);
+ __be64 rpdu = cpu_to_be64(depth);
+ u32 what;
- __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
- sizeof(rpdu), &rpdu);
- }
-}
-
-static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
-{
- struct blk_trace *bt = q->blk_trace;
-
- if (bt) {
- unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
- __be64 rpdu = cpu_to_be64(pdu);
+ if (explicit)
+ what = BLK_TA_UNPLUG_IO;
+ else
+ what = BLK_TA_UNPLUG_TIMER;
- __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
- sizeof(rpdu), &rpdu);
+ __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
}
}
@@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void)
WARN_ON(ret);
ret = register_trace_block_plug(blk_add_trace_plug, NULL);
WARN_ON(ret);
- ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
- WARN_ON(ret);
- ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
+ ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
WARN_ON(ret);
ret = register_trace_block_split(blk_add_trace_split, NULL);
WARN_ON(ret);
@@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void)
unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
unregister_trace_block_split(blk_add_trace_split, NULL);
- unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
- unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
+ unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
unregister_trace_block_plug(blk_add_trace_plug, NULL);
unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 05672e819f8c..a235f3cc471c 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -49,12 +49,9 @@ static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res)
val = *s - '0';
else if ('a' <= _tolower(*s) && _tolower(*s) <= 'f')
val = _tolower(*s) - 'a' + 10;
- else if (*s == '\n') {
- if (*(s + 1) == '\0')
- break;
- else
- return -EINVAL;
- } else
+ else if (*s == '\n' && *(s + 1) == '\0')
+ break;
+ else
return -EINVAL;
if (val >= base)
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index 325c2f9ecebd..d55769d63cb8 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -315,12 +315,12 @@ static void __init test_kstrtou64_ok(void)
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
- {"2147483648", 10, 2147483648},
- {"2147483649", 10, 2147483649},
- {"4294967294", 10, 4294967294},
- {"4294967295", 10, 4294967295},
- {"4294967296", 10, 4294967296},
- {"4294967297", 10, 4294967297},
+ {"2147483648", 10, 2147483648ULL},
+ {"2147483649", 10, 2147483649ULL},
+ {"4294967294", 10, 4294967294ULL},
+ {"4294967295", 10, 4294967295ULL},
+ {"4294967296", 10, 4294967296ULL},
+ {"4294967297", 10, 4294967297ULL},
{"9223372036854775806", 10, 9223372036854775806ULL},
{"9223372036854775807", 10, 9223372036854775807ULL},
{"9223372036854775808", 10, 9223372036854775808ULL},
@@ -369,12 +369,12 @@ static void __init test_kstrtos64_ok(void)
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
- {"2147483648", 10, 2147483648},
- {"2147483649", 10, 2147483649},
- {"4294967294", 10, 4294967294},
- {"4294967295", 10, 4294967295},
- {"4294967296", 10, 4294967296},
- {"4294967297", 10, 4294967297},
+ {"2147483648", 10, 2147483648LL},
+ {"2147483649", 10, 2147483649LL},
+ {"4294967294", 10, 4294967294LL},
+ {"4294967295", 10, 4294967295LL},
+ {"4294967296", 10, 4294967296LL},
+ {"4294967297", 10, 4294967297LL},
{"9223372036854775806", 10, 9223372036854775806LL},
{"9223372036854775807", 10, 9223372036854775807LL},
};
@@ -418,10 +418,10 @@ static void __init test_kstrtou32_ok(void)
{"65537", 10, 65537},
{"2147483646", 10, 2147483646},
{"2147483647", 10, 2147483647},
- {"2147483648", 10, 2147483648},
- {"2147483649", 10, 2147483649},
- {"4294967294", 10, 4294967294},
- {"4294967295", 10, 4294967295},
+ {"2147483648", 10, 2147483648U},
+ {"2147483649", 10, 2147483649U},
+ {"4294967294", 10, 4294967294U},
+ {"4294967295", 10, 4294967295U},
};
TEST_OK(kstrtou32, u32, "%u", test_u32_ok);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0a619e0e2e0b..470dcda10add 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -244,24 +244,28 @@ static ssize_t single_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag)
{
- if (test_bit(flag, &transparent_hugepage_flags))
- return sprintf(buf, "[yes] no\n");
- else
- return sprintf(buf, "yes [no]\n");
+ return sprintf(buf, "%d\n",
+ !!test_bit(flag, &transparent_hugepage_flags));
}
+
static ssize_t single_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag)
{
- if (!memcmp("yes", buf,
- min(sizeof("yes")-1, count))) {
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ if (value)
set_bit(flag, &transparent_hugepage_flags);
- } else if (!memcmp("no", buf,
- min(sizeof("no")-1, count))) {
+ else
clear_bit(flag, &transparent_hugepage_flags);
- } else
- return -EINVAL;
return count;
}
@@ -680,8 +684,11 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
- if (unlikely(!page))
+ if (unlikely(!page)) {
+ count_vm_event(THP_FAULT_FALLBACK);
goto out;
+ }
+ count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
put_page(page);
goto out;
@@ -909,11 +916,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
new_page = NULL;
if (unlikely(!new_page)) {
+ count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
put_page(page);
goto out;
}
+ count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
@@ -1390,6 +1399,7 @@ int split_huge_page(struct page *page)
BUG_ON(!PageSwapBacked(page));
__split_huge_page(page, anon_vma);
+ count_vm_event(THP_SPLIT);
BUG_ON(PageCompound(page));
out_unlock:
@@ -1784,9 +1794,11 @@ static void collapse_huge_page(struct mm_struct *mm,
node, __GFP_OTHER_NODE);
if (unlikely(!new_page)) {
up_read(&mm->mmap_sem);
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
return;
}
+ count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
up_read(&mm->mmap_sem);
put_page(new_page);
@@ -2151,8 +2163,11 @@ static void khugepaged_do_scan(struct page **hpage)
#ifndef CONFIG_NUMA
if (!*hpage) {
*hpage = alloc_hugepage(khugepaged_defrag());
- if (unlikely(!*hpage))
+ if (unlikely(!*hpage)) {
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
break;
+ }
+ count_vm_event(THP_COLLAPSE_ALLOC);
}
#else
if (IS_ERR(*hpage))
@@ -2192,8 +2207,11 @@ static struct page *khugepaged_alloc_hugepage(void)
do {
hpage = alloc_hugepage(khugepaged_defrag());
- if (!hpage)
+ if (!hpage) {
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
khugepaged_alloc_sleep();
+ } else
+ count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) &&
likely(khugepaged_enabled()));
return hpage;
@@ -2210,8 +2228,11 @@ static void khugepaged_loop(void)
while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA
hpage = khugepaged_alloc_hugepage();
- if (unlikely(!hpage))
+ if (unlikely(!hpage)) {
+ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
break;
+ }
+ count_vm_event(THP_COLLAPSE_ALLOC);
#else
if (IS_ERR(hpage)) {
khugepaged_alloc_sleep();
diff --git a/mm/memory.c b/mm/memory.c
index 9da8cab1b1b0..ce22a250926f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1410,6 +1410,13 @@ no_page_table:
return page;
}
+static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_stack_continue(vma->vm_prev, addr);
+}
+
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
@@ -1488,7 +1495,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
unsigned long pg = start & PAGE_MASK;
- struct vm_area_struct *gate_vma = get_gate_vma(mm);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
@@ -1513,10 +1519,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
pte_unmap(pte);
return i ? : -EFAULT;
}
+ vma = get_gate_vma(mm);
if (pages) {
struct page *page;
- page = vm_normal_page(gate_vma, start, *pte);
+ page = vm_normal_page(vma, start, *pte);
if (!page) {
if (!(gup_flags & FOLL_DUMP) &&
is_zero_pfn(pte_pfn(*pte)))
@@ -1530,12 +1537,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
get_page(page);
}
pte_unmap(pte);
- if (vmas)
- vmas[i] = gate_vma;
- i++;
- start += PAGE_SIZE;
- nr_pages--;
- continue;
+ goto next_page;
}
if (!vma ||
@@ -1549,6 +1551,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
continue;
}
+ /*
+ * If we don't actually want the page itself,
+ * and it's the stack guard page, just skip it.
+ */
+ if (!pages && stack_guard_page(vma, start))
+ goto next_page;
+
do {
struct page *page;
unsigned int foll_flags = gup_flags;
@@ -1631,6 +1640,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
flush_anon_page(vma, page, start);
flush_dcache_page(page);
}
+next_page:
if (vmas)
vmas[i] = vma;
i++;
@@ -3678,7 +3688,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
*/
#ifdef CONFIG_HAVE_IOREMAP_PROT
vma = find_vma(mm, addr);
- if (!vma)
+ if (!vma || vma->vm_start > addr)
break;
if (vma->vm_ops && vma->vm_ops->access)
ret = vma->vm_ops->access(vma, addr, buf,
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a2acaf820fe5..9ca1d604f7cd 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -375,7 +375,7 @@ void online_page(struct page *page)
#endif
#ifdef CONFIG_FLATMEM
- max_mapnr = max(page_to_pfn(page), max_mapnr);
+ max_mapnr = max(pfn, max_mapnr);
#endif
ClearPageReserved(page);
diff --git a/mm/mlock.c b/mm/mlock.c
index 2689a08c79af..6b55e3efe0df 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -135,13 +135,6 @@ void munlock_vma_page(struct page *page)
}
}
-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
-{
- return (vma->vm_flags & VM_GROWSDOWN) &&
- (vma->vm_start == addr) &&
- !vma_stack_continue(vma->vm_prev, addr);
-}
-
/**
* __mlock_vma_pages_range() - mlock a range of pages in the vma.
* @vma: target vma
@@ -188,12 +181,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED)
gup_flags |= FOLL_MLOCK;
- /* We don't try to access the guard page of a stack vma */
- if (stack_guard_page(vma, start)) {
- addr += PAGE_SIZE;
- nr_pages--;
- }
-
return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
NULL, NULL, nonblocking);
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 2ec8eb5a9cdd..e27e0cf0de03 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -259,7 +259,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
* randomize_va_space to 2, which will still cause mm->start_brk
* to be arbitrarily shifted
*/
- if (mm->start_brk > PAGE_ALIGN(mm->end_data))
+ if (current->brk_randomized)
min_brk = mm->start_brk;
else
min_brk = mm->end_data;
@@ -1814,11 +1814,14 @@ static int expand_downwards(struct vm_area_struct *vma,
size = vma->vm_end - address;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
- error = acct_stack_growth(vma, size, grow);
- if (!error) {
- vma->vm_start = address;
- vma->vm_pgoff -= grow;
- perf_event_mmap(vma);
+ error = -ENOMEM;
+ if (grow <= vma->vm_pgoff) {
+ error = acct_stack_growth(vma, size, grow);
+ if (!error) {
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ perf_event_mmap(vma);
+ }
}
}
vma_unlock_anon_vma(vma);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6a819d1b2c7d..83fb72c108b7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -84,24 +84,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
#endif /* CONFIG_NUMA */
/*
- * If this is a system OOM (not a memcg OOM) and the task selected to be
- * killed is not already running at high (RT) priorities, speed up the
- * recovery by boosting the dying task to the lowest FIFO priority.
- * That helps with the recovery and avoids interfering with RT tasks.
- */
-static void boost_dying_task_prio(struct task_struct *p,
- struct mem_cgroup *mem)
-{
- struct sched_param param = { .sched_priority = 1 };
-
- if (mem)
- return;
-
- if (!rt_task(p))
- sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
-}
-
-/*
* The process p may have detached its own ->mm while exiting or through
* use_mm(), but one or more of its subthreads may still have a valid
* pointer. Return p, or any of its subthreads with a valid ->mm, with
@@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
set_tsk_thread_flag(p, TIF_MEMDIE);
force_sig(SIGKILL, p);
- /*
- * We give our sacrificial lamb high priority and access to
- * all the memory it needs. That way it should be able to
- * exit() and clear out its resources quickly...
- */
- boost_dying_task_prio(p, mem);
-
return 0;
}
#undef K
@@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
*/
if (p->flags & PF_EXITING) {
set_tsk_thread_flag(p, TIF_MEMDIE);
- boost_dying_task_prio(p, mem);
return 0;
}
@@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
- boost_dying_task_prio(current, NULL);
return;
}
@@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
*/
if (fatal_signal_pending(current)) {
set_thread_flag(TIF_MEMDIE);
- boost_dying_task_prio(current, NULL);
return;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2747f5e5abc1..9f8a97b9a350 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3176,7 +3176,7 @@ static __init_refok int __build_all_zonelists(void *data)
* Called with zonelists_mutex held always
* unless system_state == SYSTEM_BOOTING.
*/
-void build_all_zonelists(void *data)
+void __ref build_all_zonelists(void *data)
{
set_zonelist_order();
diff --git a/mm/shmem.c b/mm/shmem.c
index 58da7c150ba6..8fa27e4e582a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -421,7 +421,8 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
* a waste to allocate index if we cannot allocate data.
*/
if (sbinfo->max_blocks) {
- if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0)
+ if (percpu_counter_compare(&sbinfo->used_blocks,
+ sbinfo->max_blocks - 1) >= 0)
return ERR_PTR(-ENOSPC);
percpu_counter_inc(&sbinfo->used_blocks);
spin_lock(&inode->i_lock);
@@ -1397,7 +1398,8 @@ repeat:
shmem_swp_unmap(entry);
sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks) {
- if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) ||
+ if (percpu_counter_compare(&sbinfo->used_blocks,
+ sbinfo->max_blocks) >= 0 ||
shmem_acct_block(info->flags)) {
spin_unlock(&info->lock);
error = -ENOSPC;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c7f5a6d4b75b..f6b435c80079 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,6 +41,7 @@
#include <linux/memcontrol.h>
#include <linux/delayacct.h>
#include <linux/sysctl.h>
+#include <linux/oom.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -1988,17 +1989,12 @@ static bool zone_reclaimable(struct zone *zone)
return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
}
-/*
- * As hibernation is going on, kswapd is freezed so that it can't mark
- * the zone into all_unreclaimable. It can't handle OOM during hibernation.
- * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
- */
+/* All zones in zonelist are unreclaimable? */
static bool all_unreclaimable(struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
- bool all_unreclaimable = true;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2002,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
continue;
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone_reclaimable(zone)) {
- all_unreclaimable = false;
- break;
- }
+ if (!zone->all_unreclaimable)
+ return false;
}
- return all_unreclaimable;
+ return true;
}
/*
@@ -2108,6 +2102,14 @@ out:
if (sc->nr_reclaimed)
return sc->nr_reclaimed;
+ /*
+ * As hibernation is going on, kswapd is freezed so that it can't mark
+ * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
+ * check.
+ */
+ if (oom_killer_disabled)
+ return 0;
+
/* top priority shrink_zones still had more to do? don't OOM, then */
if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
return 1;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 772b39b87d95..897ea9e88238 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -321,9 +321,12 @@ static inline void mod_state(struct zone *zone,
/*
* The fetching of the stat_threshold is racy. We may apply
* a counter threshold to the wrong the cpu if we get
- * rescheduled while executing here. However, the following
- * will apply the threshold again and therefore bring the
- * counter under the threshold.
+ * rescheduled while executing here. However, the next
+ * counter update will apply the threshold again and
+ * therefore bring the counter under the threshold again.
+ *
+ * Most of the time the thresholds are the same anyways
+ * for all cpus in a zone.
*/
t = this_cpu_read(pcp->stat_threshold);
@@ -945,7 +948,16 @@ static const char * const vmstat_text[] = {
"unevictable_pgs_cleared",
"unevictable_pgs_stranded",
"unevictable_pgs_mlockfreed",
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ "thp_fault_alloc",
+ "thp_fault_fallback",
+ "thp_collapse_alloc",
+ "thp_collapse_alloc_failed",
+ "thp_split",
#endif
+
+#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
diff --git a/net/9p/client.c b/net/9p/client.c
index 48b8e084e710..77367745be9b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -929,15 +929,15 @@ error:
}
EXPORT_SYMBOL(p9_client_attach);
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
- int clone)
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+ char **wnames, int clone)
{
int err;
struct p9_client *clnt;
struct p9_fid *fid;
struct p9_qid *wqids;
struct p9_req_t *req;
- int16_t nwqids, count;
+ uint16_t nwqids, count;
err = 0;
wqids = NULL;
@@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
fid = oldfid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n",
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
@@ -1220,27 +1220,6 @@ error:
}
EXPORT_SYMBOL(p9_client_fsync);
-int p9_client_sync_fs(struct p9_fid *fid)
-{
- int err = 0;
- struct p9_req_t *req;
- struct p9_client *clnt;
-
- P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
-
- clnt = fid->clnt;
- req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto error;
- }
- P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
- p9_free_req(clnt, req);
-error:
- return err;
-}
-EXPORT_SYMBOL(p9_client_sync_fs);
-
int p9_client_clunk(struct p9_fid *fid)
{
int err;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 8a4084fa8b5a..b58a501cf3d1 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'T':{
- int16_t *nwname = va_arg(ap, int16_t *);
+ uint16_t *nwname = va_arg(ap, uint16_t *);
char ***wnames = va_arg(ap, char ***);
errcode = p9pdu_readf(pdu, proto_version,
@@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
case 'E':{
int32_t cnt = va_arg(ap, int32_t);
const char *k = va_arg(ap, const void *);
- const char *u = va_arg(ap, const void *);
+ const char __user *u = va_arg(ap,
+ const void __user *);
errcode = p9pdu_writef(pdu, proto_version, "d",
cnt);
if (!errcode && pdu_write_urw(pdu, k, u, cnt))
@@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'T':{
- int16_t nwname = va_arg(ap, int);
+ uint16_t nwname = va_arg(ap, int);
const char **wnames = va_arg(ap, const char **);
errcode = p9pdu_writef(pdu, proto_version, "w",
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index d47880e971dd..e883172f9aa2 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
uint32_t pdata_mapped_pages;
struct trans_rpage_info *rpinfo;
- *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
+ *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
if (*pdata_off)
first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e8f046b07182..244e70742183 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -326,8 +326,11 @@ req_retry_pinned:
outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
pdata_off, rpinfo->rp_data, pdata_len);
} else {
- char *pbuf = req->tc->pubuf ? req->tc->pubuf :
- req->tc->pkbuf;
+ char *pbuf;
+ if (req->tc->pubuf)
+ pbuf = (__force char *) req->tc->pubuf;
+ else
+ pbuf = req->tc->pkbuf;
outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
req->tc->pbuf_size);
}
@@ -352,8 +355,12 @@ req_retry_pinned:
in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
pdata_off, rpinfo->rp_data, pdata_len);
} else {
- char *pbuf = req->tc->pubuf ? req->tc->pubuf :
- req->tc->pkbuf;
+ char *pbuf;
+ if (req->tc->pubuf)
+ pbuf = (__force char *) req->tc->pubuf;
+ else
+ pbuf = req->tc->pkbuf;
+
in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
pbuf, req->tc->pbuf_size);
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 008ff6c4eecf..f3bc322c5891 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
goto drop;
}
- /* Zero out the CB buffer if no options present */
- if (iph->ihl == 5) {
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (iph->ihl == 5)
return 0;
- }
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 27dab26ad3b8..054fdb5aeb88 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -13,6 +13,7 @@
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+
#define container_obj(layr) ((struct cfsrvl *) layr)
#define DGM_CMD_BIT 0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
+ u8 packet_type;
u32 zero = 0;
struct caif_payload_info *info;
struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > DGM_MTU)
return -EMSGSIZE;
- cfpkt_add_head(pkt, &zero, 4);
+ cfpkt_add_head(pkt, &zero, 3);
+ packet_type = 0x08; /* B9 set - UNCLASSIFIED */
+ cfpkt_add_head(pkt, &packet_type, 1);
/* Add info for MUX-layer to route the packet out. */
info = cfpkt_info(pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 46f34b2e0478..24f1ffa74b06 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid)
{
struct cfmuxl *muxl = container_obj(layr);
- struct list_head *node;
+ struct list_head *node, *next;
struct cflayer *layer;
- list_for_each(node, &muxl->srvl_list) {
+ list_for_each_safe(node, next, &muxl->srvl_list) {
layer = list_entry(node, struct cflayer, node);
if (cfsrvl_phyid_match(layer, phyid))
layer->ctrlcmd(layer, ctrl, phyid);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 50af02737a3d..5a80f41c0cba 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -579,9 +579,15 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc,
list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
r_linger_osd) {
- __unregister_linger_request(osdc, req);
+ /*
+ * reregister request prior to unregistering linger so
+ * that r_osd is preserved.
+ */
+ BUG_ON(!list_empty(&req->r_req_lru_item));
__register_request(osdc, req);
- list_move(&req->r_req_lru_item, &osdc->req_unsent);
+ list_add(&req->r_req_lru_item, &osdc->req_unsent);
+ list_add(&req->r_osd_item, &req->r_osd->o_requests);
+ __unregister_linger_request(osdc, req);
dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid,
osd->o_osd);
}
@@ -798,7 +804,7 @@ static void __register_request(struct ceph_osd_client *osdc,
req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
INIT_LIST_HEAD(&req->r_req_lru_item);
- dout("register_request %p tid %lld\n", req, req->r_tid);
+ dout("__register_request %p tid %lld\n", req, req->r_tid);
__insert_request(osdc, req);
ceph_osdc_get_request(req);
osdc->num_requests++;
diff --git a/net/core/dev.c b/net/core/dev.c
index 956d3b006e8b..c2ac599fa0f6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
}
/* TSO requires that SG is present as well. */
- if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
- features &= ~NETIF_F_TSO;
+ if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
+ netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+ features &= ~NETIF_F_ALL_TSO;
}
+ /* TSO ECN requires that TSO is present as well. */
+ if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
+ features &= ~NETIF_F_TSO_ECN;
+
/* Software GSO depends on SG. */
if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index d951f93644bf..3da418894efc 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -14,6 +14,13 @@
#include "dsa_priv.h"
#include "mv88e6xxx.h"
+/*
+ * Switch product IDs
+ */
+#define ID_6085 0x04a0
+#define ID_6095 0x0950
+#define ID_6131 0x1060
+
static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
{
int ret;
@@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
if (ret >= 0) {
ret &= 0xfff0;
- if (ret == 0x0950)
+ if (ret == ID_6085)
+ return "Marvell 88E6085";
+ if (ret == ID_6095)
return "Marvell 88E6095/88E6095F";
- if (ret == 0x1060)
+ if (ret == ID_6131)
return "Marvell 88E6131";
}
@@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
int addr = REG_PORT(p);
u16 val;
@@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
* MAC Forcing register: don't force link, speed, duplex
* or flow control state to any particular values on physical
* ports, but force the CPU port and all DSA ports to 1000 Mb/s
- * full duplex.
+ * (100 Mb/s on 6085) full duplex.
*/
if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
- REG_WRITE(addr, 0x01, 0x003e);
+ if (ps->id == ID_6085)
+ REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
else
REG_WRITE(addr, 0x01, 0x0003);
@@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
mv88e6xxx_ppu_state_init(ds);
mutex_init(&ps->stats_mutex);
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
ret = mv88e6131_switch_reset(ds);
if (ret < 0)
return ret;
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h
index eb0e0aaa9f1b..61156ca26a0d 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/net/dsa/mv88e6xxx.h
@@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
* Hold this mutex over snapshot + dump sequences.
*/
struct mutex stats_mutex;
+
+ int id; /* switch product id */
};
struct mv88e6xxx_hw_stat {
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index ce2d33582859..5761185f884e 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
-
-ccflags-y += -Wall -DDEBUG
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6c0b7f4a3d7d..38f23e721b80 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
+ sk2->sk_state == TCP_LISTEN) {
const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
(tb->num_owners < smallest_size || smallest_size == -1)) {
smallest_size = tb->num_owners;
smallest_rover = rover;
- if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
- !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+ if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
spin_unlock(&head->lock);
snum = smallest_rover;
goto have_snum;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dd1b20eca1a2..9df4e635fb5f 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
}
/* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
int do_free;
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer __rcu **stack[PEER_MAXDEPTH];
struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(&p->daddr, stack, base) != p)
BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
}
/* May be called with local BH enabled. */
-static int cleanup_once(unsigned long ttl)
+static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
{
struct inet_peer *p = NULL;
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
* happen because of entry limits in route cache. */
return -1;
- unlink_from_pool(p, peer_to_base(p));
+ unlink_from_pool(p, peer_to_base(p), stack);
return 0;
}
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
if (base->total >= inet_peer_threshold)
/* Remove one less-recently-used entry. */
- cleanup_once(0);
+ cleanup_once(0, stack);
return p;
}
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
{
unsigned long now = jiffies;
int ttl, total;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
total = compute_total();
if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
total / inet_peer_threshold * HZ;
- while (!cleanup_once(ttl)) {
+ while (!cleanup_once(ttl, stack)) {
if (jiffies != now)
break;
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 28a736f3442f..2391b24e8251 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
pp_ptr = optptr + 2;
goto error;
}
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
goto error;
}
opt->ts = optptr - iph;
- if (skb) {
+ if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
unsigned long orefdst;
int err;
- if (!opt->srr)
+ if (!opt->srr || !rt)
return 0;
if (skb->pkt_type != PACKET_HOST)
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f3c0b549b8e1..4614babdc45f 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
return csum;
}
-static int nf_ip_route(struct dst_entry **dst, struct flowi *fl)
+static int nf_ip_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict __always_unused)
{
- struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4);
+ struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
if (IS_ERR(rt))
return PTR_ERR(rt);
*dst = &rt->dst;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ea107515c53e..c1acf69858fd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
+ rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex;
rth->dst.dev = init_net.loopback_dev;
dev_hold(rth->dst.dev);
@@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_key_src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
+ rth->rt_route_iif = in_dev->dev->ifindex;
rth->rt_iif = in_dev->dev->ifindex;
rth->dst.dev = (out_dev)->dev;
dev_hold(rth->dst.dev);
@@ -2202,6 +2204,7 @@ local_input:
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
+ rth->rt_route_iif = dev->ifindex;
rth->rt_iif = dev->ifindex;
rth->dst.dev = net->loopback_dev;
dev_hold(rth->dst.dev);
@@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_mark = oldflp4->flowi4_mark;
rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr;
- rth->rt_iif = 0;
+ rth->rt_route_iif = 0;
+ rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
/* get references to the devices that are to be hold by the routing
cache entry */
rth->dst.dev = dev_out;
@@ -2716,6 +2720,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_key_dst = ort->rt_key_dst;
rt->rt_key_src = ort->rt_key_src;
rt->rt_tos = ort->rt_tos;
+ rt->rt_route_iif = ort->rt_route_iif;
rt->rt_iif = ort->rt_iif;
rt->rt_oif = ort->rt_oif;
rt->rt_mark = ort->rt_mark;
@@ -2725,7 +2730,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_type = ort->rt_type;
rt->rt_dst = ort->rt_dst;
rt->rt_src = ort->rt_src;
- rt->rt_iif = ort->rt_iif;
rt->rt_gateway = ort->rt_gateway;
rt->rt_spec_dst = ort->rt_spec_dst;
rt->peer = ort->peer;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1a456652086b..321e6e84dbcc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
-#ifdef CONFIG_IP_MULTICAST
{
.procname = "igmp_max_memberships",
.data = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
-
-#endif
{
.procname = "igmp_max_msf",
.data = &sysctl_igmp_max_msf,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 13e0e7f659ff..d20a05e970d8 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
rt->rt_key_dst = fl4->daddr;
rt->rt_key_src = fl4->saddr;
rt->rt_tos = fl4->flowi4_tos;
+ rt->rt_route_iif = fl4->flowi4_iif;
rt->rt_iif = fl4->flowi4_iif;
rt->rt_oif = fl4->flowi4_oif;
rt->rt_mark = fl4->flowi4_mark;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 166054650466..f2c5b0fc0f21 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
(!sk->sk_reuse || !sk2->sk_reuse ||
- ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
+ sk2->sk_state == TCP_LISTEN) &&
ipv6_rcv_saddr_equal(sk, sk2))
break;
}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39aaca2b4fd2..28bc1f644b7b 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -90,9 +90,18 @@ static int nf_ip6_reroute(struct sk_buff *skb,
return 0;
}
-static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl)
+static int nf_ip6_route(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict)
{
- *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6);
+ static const struct ipv6_pinfo fake_pinfo;
+ static const struct inet_sock fake_sk = {
+ /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
+ .sk.sk_bound_dev_if = 1,
+ .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
+ };
+ const void *sk = strict ? &fake_sk : NULL;
+
+ *dst = ip6_route_output(net, sk, &fl->u.ip6);
return (*dst)->error;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 56fa12538d45..4f49e5dd41bb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1622,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1649,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
__kfree_skb(opt_skb);
return 0;
}
- }
+ } else
+ sock_rps_save_rxhash(sk, skb->rxhash);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d7037c006e13..15c37746845e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
int rc;
int is_udplite = IS_UDPLITE(sk);
+ if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+ sock_rps_save_rxhash(sk, skb->rxhash);
+
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c9890e25cd4c..cc616974a447 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Note : socket.c set MSG_EOR on SEQPACKET sockets */
if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
MSG_NOSIGNAL)) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
lock_sock(sk);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 058f1e9a9128..903242111317 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
s32 data_size = ntohs(pdulen) - llc_len;
if (data_size < 0 ||
- ((skb_tail_pointer(skb) -
- (u8 *)pdu) - llc_len) < data_size)
+ !pskb_may_pull(skb, data_size))
return 0;
if (unlikely(pskb_trim_rcsum(skb, data_size)))
return 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9d192d665ff5..c5d4530d8284 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2541,7 +2541,6 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
* same TID from the same station
*/
rx->skb = skb;
- rx->flags = 0;
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
@@ -2612,6 +2611,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
.sdata = sta->sdata,
.local = sta->local,
.queue = tid,
+ .flags = 0,
};
struct tid_ampdu_rx *tid_agg_rx;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3f988aa1152..32bff6d86cb2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -652,7 +652,6 @@ comment "Xtables matches"
config NETFILTER_XT_MATCH_ADDRTYPE
tristate '"addrtype" address type match support'
depends on NETFILTER_ADVANCED
- depends on (IPV6 || IPV6=n)
---help---
This option allows you to match what routing thinks of an address,
eg. UNICAST, LOCAL, BROADCAST, ...
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index bca96990218d..a113ff066928 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
if (map->netmask != 32)
NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 5e790172deff..a274300b6a56 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
ipset_adtfn adtfn = set->variant->adt[adt];
struct ipmac data;
+ /* MAC can be src only */
+ if (!(flags & IPSET_DIM_TWO_SRC))
+ return 0;
+
data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
if (data.id < map->first_ip || data.id > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
@@ -434,8 +438,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map)
+ (map->last_ip - map->first_ip + 1) * map->dsize));
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 165f09b1a9cb..6b38eb8f6ed8 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
goto nla_put_failure;
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->memsize));
if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 253326e8d990..72d1ac611fdc 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -26,6 +26,7 @@
static LIST_HEAD(ip_set_type_list); /* all registered set types */
static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
+static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
static struct ip_set **ip_set_list; /* all individual sets */
static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
static inline void
__ip_set_get(ip_set_id_t index)
{
- atomic_inc(&ip_set_list[index]->ref);
+ write_lock_bh(&ip_set_ref_lock);
+ ip_set_list[index]->ref++;
+ write_unlock_bh(&ip_set_ref_lock);
}
static inline void
__ip_set_put(ip_set_id_t index)
{
- atomic_dec(&ip_set_list[index]->ref);
+ write_lock_bh(&ip_set_ref_lock);
+ BUG_ON(ip_set_list[index]->ref == 0);
+ ip_set_list[index]->ref--;
+ write_unlock_bh(&ip_set_ref_lock);
}
/*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index];
int ret = 0;
- BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index];
int ret;
- BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
struct ip_set *set = ip_set_list[index];
int ret = 0;
- BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ BUG_ON(set == NULL);
pr_debug("set %s, index %u\n", set->name, index);
if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
* Find set by name, reference it once. The reference makes sure the
* thing pointed to, does not go away under our feet.
*
- * The nfnl mutex must already be activated.
*/
ip_set_id_t
ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
* reference count by 1. The caller shall not assume the index
* to be valid, after calling this function.
*
- * The nfnl mutex must already be activated.
*/
void
ip_set_put_byindex(ip_set_id_t index)
{
- if (ip_set_list[index] != NULL) {
- BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ if (ip_set_list[index] != NULL)
__ip_set_put(index);
- }
}
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
* can't be destroyed. The set cannot be renamed due to
* the referencing either.
*
- * The nfnl mutex must already be activated.
*/
const char *
ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
const struct ip_set *set = ip_set_list[index];
BUG_ON(set == NULL);
- BUG_ON(atomic_read(&set->ref) == 0);
+ BUG_ON(set->ref == 0);
/* Referenced, so it's safe */
return set->name;
@@ -515,10 +516,7 @@ void
ip_set_nfnl_put(ip_set_id_t index)
{
nfnl_lock();
- if (ip_set_list[index] != NULL) {
- BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
- __ip_set_put(index);
- }
+ ip_set_put_byindex(index);
nfnl_unlock();
}
EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
/*
* Communication protocol with userspace over netlink.
*
- * We already locked by nfnl_lock.
+ * The commands are serialized by the nfnl mutex.
*/
static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
return -ENOMEM;
rwlock_init(&set->lock);
strlcpy(set->name, name, IPSET_MAXNAMELEN);
- atomic_set(&set->ref, 0);
set->family = family;
/*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
/*
* Here, we have a valid, constructed set and we are protected
- * by nfnl_lock. Find the first free index in ip_set_list and
- * check clashing.
+ * by the nfnl mutex. Find the first free index in ip_set_list
+ * and check clashing.
*/
if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
/* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
const struct nlattr * const attr[])
{
ip_set_id_t i;
+ int ret = 0;
if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL;
- /* References are protected by the nfnl mutex */
+ /* Commands are serialized and references are
+ * protected by the ip_set_ref_lock.
+ * External systems (i.e. xt_set) must call
+ * ip_set_put|get_nfnl_* functions, that way we
+ * can safely check references here.
+ *
+ * list:set timer can only decrement the reference
+ * counter, so if it's already zero, we can proceed
+ * without holding the lock.
+ */
+ read_lock_bh(&ip_set_ref_lock);
if (!attr[IPSET_ATTR_SETNAME]) {
for (i = 0; i < ip_set_max; i++) {
- if (ip_set_list[i] != NULL &&
- (atomic_read(&ip_set_list[i]->ref)))
- return -IPSET_ERR_BUSY;
+ if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
+ ret = IPSET_ERR_BUSY;
+ goto out;
+ }
}
+ read_unlock_bh(&ip_set_ref_lock);
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL)
ip_set_destroy_set(i);
}
} else {
i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
- if (i == IPSET_INVALID_ID)
- return -ENOENT;
- else if (atomic_read(&ip_set_list[i]->ref))
- return -IPSET_ERR_BUSY;
+ if (i == IPSET_INVALID_ID) {
+ ret = -ENOENT;
+ goto out;
+ } else if (ip_set_list[i]->ref) {
+ ret = -IPSET_ERR_BUSY;
+ goto out;
+ }
+ read_unlock_bh(&ip_set_ref_lock);
ip_set_destroy_set(i);
}
return 0;
+out:
+ read_unlock_bh(&ip_set_ref_lock);
+ return ret;
}
/* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *set;
const char *name2;
ip_set_id_t i;
+ int ret = 0;
if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
if (set == NULL)
return -ENOENT;
- if (atomic_read(&set->ref) != 0)
- return -IPSET_ERR_REFERENCED;
+
+ read_lock_bh(&ip_set_ref_lock);
+ if (set->ref != 0) {
+ ret = -IPSET_ERR_REFERENCED;
+ goto out;
+ }
name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
for (i = 0; i < ip_set_max; i++) {
if (ip_set_list[i] != NULL &&
- STREQ(ip_set_list[i]->name, name2))
- return -IPSET_ERR_EXIST_SETNAME2;
+ STREQ(ip_set_list[i]->name, name2)) {
+ ret = -IPSET_ERR_EXIST_SETNAME2;
+ goto out;
+ }
}
strncpy(set->name, name2, IPSET_MAXNAMELEN);
- return 0;
+out:
+ read_unlock_bh(&ip_set_ref_lock);
+ return ret;
}
/* Swap two sets so that name/index points to the other.
* References and set names are also swapped.
*
- * We are protected by the nfnl mutex and references are
- * manipulated only by holding the mutex. The kernel interfaces
+ * The commands are serialized by the nfnl mutex and references are
+ * protected by the ip_set_ref_lock. The kernel interfaces
* do not hold the mutex but the pointer settings are atomic
* so the ip_set_list always contains valid pointers to the sets.
*/
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
struct ip_set *from, *to;
ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN];
- u32 from_ref;
if (unlikely(protocol_failed(attr) ||
attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -899,17 +924,15 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
from->type->family == to->type->family))
return -IPSET_ERR_TYPE_MISMATCH;
- /* No magic here: ref munging protected by the nfnl_lock */
strncpy(from_name, from->name, IPSET_MAXNAMELEN);
- from_ref = atomic_read(&from->ref);
-
strncpy(from->name, to->name, IPSET_MAXNAMELEN);
- atomic_set(&from->ref, atomic_read(&to->ref));
strncpy(to->name, from_name, IPSET_MAXNAMELEN);
- atomic_set(&to->ref, from_ref);
+ write_lock_bh(&ip_set_ref_lock);
+ swap(from->ref, to->ref);
ip_set_list[from_id] = to;
ip_set_list[to_id] = from;
+ write_unlock_bh(&ip_set_ref_lock);
return 0;
}
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
{
if (cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
- __ip_set_put((ip_set_id_t) cb->args[1]);
+ ip_set_put_byindex((ip_set_id_t) cb->args[1]);
}
return 0;
}
@@ -999,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->args[1] >= ip_set_max)
goto out;
- pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+dump_last:
+ pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
for (; cb->args[1] < max; cb->args[1]++) {
index = (ip_set_id_t) cb->args[1];
set = ip_set_list[index];
@@ -1015,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
* so that lists (unions of sets) are dumped last.
*/
if (cb->args[0] != DUMP_ONE &&
- !((cb->args[0] == DUMP_ALL) ^
- (set->type->features & IPSET_DUMP_LAST)))
+ ((cb->args[0] == DUMP_ALL) ==
+ !!(set->type->features & IPSET_DUMP_LAST)))
continue;
pr_debug("List set: %s\n", set->name);
if (!cb->args[2]) {
@@ -1060,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
goto release_refcount;
}
}
+ /* If we dump all sets, continue with dumping last ones */
+ if (cb->args[0] == DUMP_ALL) {
+ cb->args[0] = DUMP_LAST;
+ cb->args[1] = 0;
+ goto dump_last;
+ }
goto out;
nla_put_failure:
@@ -1068,13 +1098,8 @@ release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[2]) {
pr_debug("release set %s\n", ip_set_list[index]->name);
- __ip_set_put(index);
+ ip_set_put_byindex(index);
}
-
- /* If we dump all sets, continue with dumping last ones */
- if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
- cb->args[0] = DUMP_LAST;
-
out:
if (nlh) {
nlmsg_end(skb, nlh);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a47c32982f06..e9159e99fc4b 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -43,14 +43,19 @@ struct list_set {
static inline struct set_elem *
list_set_elem(const struct list_set *map, u32 id)
{
- return (struct set_elem *)((char *)map->members + id * map->dsize);
+ return (struct set_elem *)((void *)map->members + id * map->dsize);
+}
+
+static inline struct set_telem *
+list_set_telem(const struct list_set *map, u32 id)
+{
+ return (struct set_telem *)((void *)map->members + id * map->dsize);
}
static inline bool
list_set_timeout(const struct list_set *map, u32 id)
{
- const struct set_telem *elem =
- (const struct set_telem *) list_set_elem(map, id);
+ const struct set_telem *elem = list_set_telem(map, id);
return ip_set_timeout_test(elem->timeout);
}
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
static inline bool
list_set_expired(const struct list_set *map, u32 id)
{
- const struct set_telem *elem =
- (const struct set_telem *) list_set_elem(map, id);
+ const struct set_telem *elem = list_set_telem(map, id);
return ip_set_timeout_expired(elem->timeout);
}
-static inline int
-list_set_exist(const struct set_telem *elem)
-{
- return elem->id != IPSET_INVALID_ID &&
- !ip_set_timeout_expired(elem->timeout);
-}
-
/* Set list without and with timeout */
static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
struct set_telem *e;
for (; i < map->size; i++) {
- e = (struct set_telem *)list_set_elem(map, i);
+ e = list_set_telem(map, i);
swap(e->id, id);
+ swap(e->timeout, timeout);
if (e->id == IPSET_INVALID_ID)
break;
- swap(e->timeout, timeout);
}
}
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
/* Last element replaced: e.g. add new,before,last */
ip_set_put_byindex(e->id);
if (with_timeout(map->timeout))
- list_elem_tadd(map, i, id, timeout);
+ list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
else
list_elem_add(map, i, id);
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
}
static int
-list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+list_set_del(struct list_set *map, u32 i)
{
struct set_elem *a = list_set_elem(map, i), *b;
- ip_set_put_byindex(id);
+ ip_set_put_byindex(a->id);
for (; i < map->size - 1; i++) {
b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
(before == 0 ||
(before > 0 &&
next_id_eq(map, i, refid))))
- ret = list_set_del(map, id, i);
+ ret = list_set_del(map, i);
else if (before < 0 &&
elem->id == refid &&
next_id_eq(map, i, id))
- ret = list_set_del(map, id, i + 1);
+ ret = list_set_del(map, i + 1);
}
break;
default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
if (with_timeout(map->timeout))
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
- NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
- htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
htonl(sizeof(*map) + map->size * map->dsize));
ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
struct set_telem *e;
u32 i;
- /* We run parallel with other readers (test element)
- * but adding/deleting new entries is locked out */
- read_lock_bh(&set->lock);
- for (i = map->size - 1; i >= 0; i--) {
- e = (struct set_telem *) list_set_elem(map, i);
- if (e->id != IPSET_INVALID_ID &&
- list_set_expired(map, i))
- list_set_del(map, e->id, i);
+ write_lock_bh(&set->lock);
+ for (i = 0; i < map->size; i++) {
+ e = list_set_telem(map, i);
+ if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
+ list_set_del(map, i);
}
- read_unlock_bh(&set->lock);
+ write_unlock_bh(&set->lock);
map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
add_timer(&map->gc);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 33733c8872e7..ae47090bf45f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3120,7 +3120,7 @@ nla_put_failure:
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct net *net = skb_net(skb);
+ struct net *net = skb_sknet(skb);
struct netns_ipvs *ipvs = net_ipvs(net);
mutex_lock(&__ip_vs_mutex);
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 867882313e49..bcd5ed6b7130 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
CHECK_BOUND(bs, 2);
count = *bs->cur++;
count <<= 8;
- count = *bs->cur++;
+ count += *bs->cur++;
break;
case SEMI:
BYTE_ALIGN(bs);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 533a183e6661..18b2ce5c8ced 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2));
fl2.daddr = dst->ip;
- if (!afinfo->route((struct dst_entry **)&rt1,
- flowi4_to_flowi(&fl1))) {
- if (!afinfo->route((struct dst_entry **)&rt2,
- flowi4_to_flowi(&fl2))) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+ flowi4_to_flowi(&fl1), false)) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+ flowi4_to_flowi(&fl2), false)) {
if (rt1->rt_gateway == rt2->rt_gateway &&
rt1->dst.dev == rt2->dst.dev)
ret = 1;
@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
memset(&fl2, 0, sizeof(fl2));
ipv6_addr_copy(&fl2.daddr, &dst->in6);
- if (!afinfo->route((struct dst_entry **)&rt1,
- flowi6_to_flowi(&fl1))) {
- if (!afinfo->route((struct dst_entry **)&rt2,
- flowi6_to_flowi(&fl2))) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
+ flowi6_to_flowi(&fl1), false)) {
+ if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
+ flowi6_to_flowi(&fl2), false)) {
if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
sizeof(rt1->rt6i_gateway)) &&
rt1->dst.dev == rt2->dst.dev)
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 6e6b46cb1db9..9e63b43faeed 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
rcu_read_lock();
ai = nf_get_afinfo(family);
if (ai != NULL)
- ai->route((struct dst_entry **)&rt, &fl);
+ ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
rcu_read_unlock();
if (rt != NULL) {
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 2220b85e9519..b77d383cec78 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
MODULE_ALIAS("ip6t_addrtype");
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
-static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
+static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
+ const struct in6_addr *addr)
{
+ const struct nf_afinfo *afinfo;
+ struct flowi6 flow;
+ struct rt6_info *rt;
u32 ret;
+ int route_err;
- if (!rt)
+ memset(&flow, 0, sizeof(flow));
+ ipv6_addr_copy(&flow.daddr, addr);
+ if (dev)
+ flow.flowi6_oif = dev->ifindex;
+
+ rcu_read_lock();
+
+ afinfo = nf_get_afinfo(NFPROTO_IPV6);
+ if (afinfo != NULL)
+ route_err = afinfo->route(net, (struct dst_entry **)&rt,
+ flowi6_to_flowi(&flow), !!dev);
+ else
+ route_err = 1;
+
+ rcu_read_unlock();
+
+ if (route_err)
return XT_ADDRTYPE_UNREACHABLE;
if (rt->rt6i_flags & RTF_REJECT)
@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
ret |= XT_ADDRTYPE_LOCAL;
if (rt->rt6i_flags & RTF_ANYCAST)
ret |= XT_ADDRTYPE_ANYCAST;
+
+
+ dst_release(&rt->dst);
return ret;
}
@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
return false;
if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
- XT_ADDRTYPE_UNREACHABLE) & mask) {
- struct rt6_info *rt;
- u32 type;
- int ifindex = dev ? dev->ifindex : 0;
-
- rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
-
- type = xt_addrtype_rt6_to_type(rt);
-
- dst_release(&rt->dst);
- return !!(mask & type);
- }
+ XT_ADDRTYPE_UNREACHABLE) & mask)
+ return !!(mask & match_lookup_rt6(net, dev, addr));
return true;
}
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 2c0086a4751e..481a86fdc409 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
return info->match_flags & XT_CONNTRACK_STATE;
if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
(CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
- !!(info->invert_flags & XT_CONNTRACK_DIRECTION))
+ !(info->invert_flags & XT_CONNTRACK_DIRECTION))
return false;
if (info->match_flags & XT_CONNTRACK_ORIGSRC)
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 061d48cec137..b3babaed7719 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
+ ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
@@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
@@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
@@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
if (info->match_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: set match dimension "
"is over the limit!\n");
+ ip_set_nfnl_put(info->match_set.index);
return -ERANGE;
}
@@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
if (info->del_set.index != IPSET_INVALID_ID)
ip_set_del(info->del_set.index,
skb, par->family,
- info->add_set.dim,
+ info->del_set.dim,
info->del_set.flags);
return XT_CONTINUE;
@@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
if (index == IPSET_INVALID_ID) {
pr_warning("Cannot find del_set index %u as target\n",
info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
return -ENOENT;
}
}
if (info->add_set.dim > IPSET_DIM_MAX ||
- info->del_set.flags > IPSET_DIM_MAX) {
+ info->del_set.dim > IPSET_DIM_MAX) {
pr_warning("Protocol error: SET target dimension "
"is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
return -ERANGE;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0698cad61763..1a21c571aa03 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_assoc_set_primary(asoc, transport);
if (asoc->peer.active_path == peer)
asoc->peer.active_path = transport;
+ if (asoc->peer.retran_path == peer)
+ asoc->peer.retran_path = transport;
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
if (t)
asoc->peer.retran_path = t;
+ else
+ t = asoc->peer.retran_path;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 9022f0a6503e..0a9a2ec2e469 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -427,7 +427,7 @@ static int
context_derive_keys_rc4(struct krb5_ctx *ctx)
{
struct crypto_hash *hmac;
- static const char sigkeyconstant[] = "signaturekey";
+ char sigkeyconstant[] = "signaturekey";
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
struct hash_desc desc;
struct scatterlist sg[1];
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 659326c3e895..006ad817cd5f 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -332,7 +332,7 @@ static int conf_choice(struct menu *menu)
}
if (!child)
continue;
- if (line[strlen(line) - 1] == '?') {
+ if (line[0] && line[strlen(line) - 1] == '?') {
print_help(child);
continue;
}
diff --git a/security/capability.c b/security/capability.c
index 2984ea4f776f..bbb51156261b 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -181,7 +181,7 @@ static int cap_inode_follow_link(struct dentry *dentry,
return 0;
}
-static int cap_inode_permission(struct inode *inode, int mask)
+static int cap_inode_permission(struct inode *inode, int mask, unsigned flags)
{
return 0;
}
diff --git a/security/security.c b/security/security.c
index 101142369db4..4ba6d4cc061f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -518,16 +518,14 @@ int security_inode_permission(struct inode *inode, int mask)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
- return security_ops->inode_permission(inode, mask);
+ return security_ops->inode_permission(inode, mask, 0);
}
int security_inode_exec_permission(struct inode *inode, unsigned int flags)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
- if (flags)
- return -ECHILD;
- return security_ops->inode_permission(inode, MAY_EXEC);
+ return security_ops->inode_permission(inode, MAY_EXEC, flags);
}
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 9da6420e2056..1d027e29ce8d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -471,6 +471,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
* @avd: access vector decisions
* @result: result from avc_has_perm_noaudit
* @a: auxiliary audit data
+ * @flags: VFS walk flags
*
* Audit the granting or denial of permissions in accordance
* with the policy. This function is typically called by
@@ -481,9 +482,10 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
* be performed under a lock, to allow the lock to be released
* before calling the auditing code.
*/
-void avc_audit(u32 ssid, u32 tsid,
+int avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
- struct av_decision *avd, int result, struct common_audit_data *a)
+ struct av_decision *avd, int result, struct common_audit_data *a,
+ unsigned flags)
{
struct common_audit_data stack_data;
u32 denied, audited;
@@ -515,11 +517,24 @@ void avc_audit(u32 ssid, u32 tsid,
else
audited = requested & avd->auditallow;
if (!audited)
- return;
+ return 0;
+
if (!a) {
a = &stack_data;
COMMON_AUDIT_DATA_INIT(a, NONE);
}
+
+ /*
+ * When in a RCU walk do the audit on the RCU retry. This is because
+ * the collection of the dname in an inode audit message is not RCU
+ * safe. Note this may drop some audits when the situation changes
+ * during retry. However this is logically just as if the operation
+ * happened a little later.
+ */
+ if ((a->type == LSM_AUDIT_DATA_FS) &&
+ (flags & IPERM_FLAG_RCU))
+ return -ECHILD;
+
a->selinux_audit_data.tclass = tclass;
a->selinux_audit_data.requested = requested;
a->selinux_audit_data.ssid = ssid;
@@ -529,6 +544,7 @@ void avc_audit(u32 ssid, u32 tsid,
a->lsm_pre_audit = avc_audit_pre_callback;
a->lsm_post_audit = avc_audit_post_callback;
common_lsm_audit(a);
+ return 0;
}
/**
@@ -793,6 +809,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
* @auditdata: auxiliary audit data
+ * @flags: VFS walk flags
*
* Check the AVC to determine whether the @requested permissions are granted
* for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -802,14 +819,19 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
* permissions are granted, -%EACCES if any permissions are denied, or
* another -errno upon other errors.
*/
-int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct common_audit_data *auditdata)
+int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, struct common_audit_data *auditdata,
+ unsigned flags)
{
struct av_decision avd;
- int rc;
+ int rc, rc2;
rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
- avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
+
+ rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata,
+ flags);
+ if (rc2)
+ return rc2;
return rc;
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f9c3764e4859..f7cf0ea6faea 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1446,8 +1446,11 @@ static int task_has_capability(struct task_struct *tsk,
}
rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
- if (audit == SECURITY_CAP_AUDIT)
- avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
+ if (audit == SECURITY_CAP_AUDIT) {
+ int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
+ if (rc2)
+ return rc2;
+ }
return rc;
}
@@ -1467,7 +1470,8 @@ static int task_has_system(struct task_struct *tsk,
static int inode_has_perm(const struct cred *cred,
struct inode *inode,
u32 perms,
- struct common_audit_data *adp)
+ struct common_audit_data *adp,
+ unsigned flags)
{
struct inode_security_struct *isec;
struct common_audit_data ad;
@@ -1487,7 +1491,7 @@ static int inode_has_perm(const struct cred *cred,
ad.u.fs.inode = inode;
}
- return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
+ return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
}
/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1504,7 +1508,7 @@ static inline int dentry_has_perm(const struct cred *cred,
COMMON_AUDIT_DATA_INIT(&ad, FS);
ad.u.fs.path.mnt = mnt;
ad.u.fs.path.dentry = dentry;
- return inode_has_perm(cred, inode, av, &ad);
+ return inode_has_perm(cred, inode, av, &ad, 0);
}
/* Check whether a task can use an open file descriptor to
@@ -1540,7 +1544,7 @@ static int file_has_perm(const struct cred *cred,
/* av is zero if only checking access to the descriptor. */
rc = 0;
if (av)
- rc = inode_has_perm(cred, inode, av, &ad);
+ rc = inode_has_perm(cred, inode, av, &ad, 0);
out:
return rc;
@@ -2103,7 +2107,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
file = file_priv->file;
inode = file->f_path.dentry->d_inode;
if (inode_has_perm(cred, inode,
- FILE__READ | FILE__WRITE, NULL)) {
+ FILE__READ | FILE__WRITE, NULL, 0)) {
drop_tty = 1;
}
}
@@ -2635,7 +2639,7 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *na
return dentry_has_perm(cred, NULL, dentry, FILE__READ);
}
-static int selinux_inode_permission(struct inode *inode, int mask)
+static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags)
{
const struct cred *cred = current_cred();
struct common_audit_data ad;
@@ -2657,7 +2661,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
perms = file_mask_to_av(inode->i_mode, mask);
- return inode_has_perm(cred, inode, perms, &ad);
+ return inode_has_perm(cred, inode, perms, &ad, flags);
}
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
@@ -3205,7 +3209,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
* new inode label or new policy.
* This check is not redundant - do not remove.
*/
- return inode_has_perm(cred, inode, open_file_to_av(file), NULL);
+ return inode_has_perm(cred, inode, open_file_to_av(file), NULL, 0);
}
/* task security operations */
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 5615081b73ec..e77b2ac2908b 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -54,11 +54,11 @@ struct avc_cache_stats {
void __init avc_init(void);
-void avc_audit(u32 ssid, u32 tsid,
+int avc_audit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
struct av_decision *avd,
int result,
- struct common_audit_data *a);
+ struct common_audit_data *a, unsigned flags);
#define AVC_STRICT 1 /* Ignore permissive mode. */
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
@@ -66,9 +66,17 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
unsigned flags,
struct av_decision *avd);
-int avc_has_perm(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct common_audit_data *auditdata);
+int avc_has_perm_flags(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct common_audit_data *auditdata,
+ unsigned);
+
+static inline int avc_has_perm(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct common_audit_data *auditdata)
+{
+ return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
+}
u32 avc_policy_seqno(void);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index c6f8fcadae07..400a5d5cde61 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -686,7 +686,7 @@ static int smack_inode_rename(struct inode *old_inode,
*
* Returns 0 if access is permitted, -EACCES otherwise
*/
-static int smack_inode_permission(struct inode *inode, int mask)
+static int smack_inode_permission(struct inode *inode, int mask, unsigned flags)
{
struct smk_audit_info ad;
@@ -696,6 +696,10 @@ static int smack_inode_permission(struct inode *inode, int mask)
*/
if (mask == 0)
return 0;
+
+ /* May be droppable after audit */
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
smk_ad_setfield_u_fs_inode(&ad, inode);
return smk_curacc(smk_of_inode(inode), mask, &ad);
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 8808b82311b1..76e0d5695075 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -140,6 +140,9 @@ int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
if (!prtd || !prtd->params)
return 0;
+ if (prtd->dma_ch == -1)
+ return -EINVAL;
+
DCSR(prtd->dma_ch) &= ~DCSR_RUN;
DCSR(prtd->dma_ch) = 0;
DCMD(prtd->dma_ch) = 0;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 430f41db6044..759ade12e758 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -937,6 +937,7 @@ void snd_hda_shutup_pins(struct hda_codec *codec)
}
EXPORT_SYMBOL_HDA(snd_hda_shutup_pins);
+#ifdef SND_HDA_NEEDS_RESUME
/* Restore the pin controls cleared previously via snd_hda_shutup_pins() */
static void restore_shutup_pins(struct hda_codec *codec)
{
@@ -953,6 +954,7 @@ static void restore_shutup_pins(struct hda_codec *codec)
}
codec->pins_shutup = 0;
}
+#endif
static void init_hda_cache(struct hda_cache_rec *cache,
unsigned int record_size);
@@ -1329,6 +1331,7 @@ static void purify_inactive_streams(struct hda_codec *codec)
}
}
+#ifdef SND_HDA_NEEDS_RESUME
/* clean up all streams; called from suspend */
static void hda_cleanup_all_streams(struct hda_codec *codec)
{
@@ -1340,6 +1343,7 @@ static void hda_cleanup_all_streams(struct hda_codec *codec)
really_cleanup_stream(codec, p);
}
}
+#endif
/*
* amp access functions
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 69e33869a53e..ad97d937d3a8 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3035,6 +3035,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
+ SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
{}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 251773e45f61..715615a88a8d 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1280,6 +1280,39 @@ static int simple_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
stream_tag, format, substream);
}
+static void nvhdmi_8ch_7x_set_info_frame_parameters(struct hda_codec *codec,
+ int channels)
+{
+ unsigned int chanmask;
+ int chan = channels ? (channels - 1) : 1;
+
+ switch (channels) {
+ default:
+ case 0:
+ case 2:
+ chanmask = 0x00;
+ break;
+ case 4:
+ chanmask = 0x08;
+ break;
+ case 6:
+ chanmask = 0x0b;
+ break;
+ case 8:
+ chanmask = 0x13;
+ break;
+ }
+
+ /* Set the audio infoframe channel allocation and checksum fields. The
+ * channel count is computed implicitly by the hardware. */
+ snd_hda_codec_write(codec, 0x1, 0,
+ Nv_VERB_SET_Channel_Allocation, chanmask);
+
+ snd_hda_codec_write(codec, 0x1, 0,
+ Nv_VERB_SET_Info_Frame_Checksum,
+ (0x71 - chan - chanmask));
+}
+
static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
@@ -1298,6 +1331,10 @@ static int nvhdmi_8ch_7x_pcm_close(struct hda_pcm_stream *hinfo,
AC_VERB_SET_STREAM_FORMAT, 0);
}
+ /* The audio hardware sends a channel count of 0x7 (8ch) when all the
+ * streams are disabled. */
+ nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8);
+
return snd_hda_multi_out_dig_close(codec, &spec->multiout);
}
@@ -1308,37 +1345,16 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
{
int chs;
- unsigned int dataDCC1, dataDCC2, chan, chanmask, channel_id;
+ unsigned int dataDCC1, dataDCC2, channel_id;
int i;
mutex_lock(&codec->spdif_mutex);
chs = substream->runtime->channels;
- chan = chs ? (chs - 1) : 1;
- switch (chs) {
- default:
- case 0:
- case 2:
- chanmask = 0x00;
- break;
- case 4:
- chanmask = 0x08;
- break;
- case 6:
- chanmask = 0x0b;
- break;
- case 8:
- chanmask = 0x13;
- break;
- }
dataDCC1 = AC_DIG1_ENABLE | AC_DIG1_COPYRIGHT;
dataDCC2 = 0x2;
- /* set the Audio InforFrame Channel Allocation */
- snd_hda_codec_write(codec, 0x1, 0,
- Nv_VERB_SET_Channel_Allocation, chanmask);
-
/* turn off SPDIF once; otherwise the IEC958 bits won't be updated */
if (codec->spdif_status_reset && (codec->spdif_ctls & AC_DIG1_ENABLE))
snd_hda_codec_write(codec,
@@ -1413,10 +1429,7 @@ static int nvhdmi_8ch_7x_pcm_prepare(struct hda_pcm_stream *hinfo,
}
}
- /* set the Audio Info Frame Checksum */
- snd_hda_codec_write(codec, 0x1, 0,
- Nv_VERB_SET_Info_Frame_Checksum,
- (0x71 - chan - chanmask));
+ nvhdmi_8ch_7x_set_info_frame_parameters(codec, chs);
mutex_unlock(&codec->spdif_mutex);
return 0;
@@ -1512,6 +1525,11 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec)
spec->multiout.max_channels = 8;
spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x;
codec->patch_ops = nvhdmi_patch_ops_8ch_7x;
+
+ /* Initialize the audio infoframe channel mask and checksum to something
+ * valid */
+ nvhdmi_8ch_7x_set_info_frame_parameters(codec, 8);
+
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7e28a64884f6..d3bd2c10180f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -14124,7 +14124,7 @@ static hda_nid_t alc269vb_capsrc_nids[1] = {
};
static hda_nid_t alc269_adc_candidates[] = {
- 0x08, 0x09, 0x07,
+ 0x08, 0x09, 0x07, 0x11,
};
#define alc269_modes alc260_modes
@@ -14868,6 +14868,23 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
alc_write_coef_idx(codec, 0x1e, coef | 0x80);
}
+static void alc271_fixup_dmic(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+{
+ static struct hda_verb verbs[] = {
+ {0x20, AC_VERB_SET_COEF_INDEX, 0x0d},
+ {0x20, AC_VERB_SET_PROC_COEF, 0x4000},
+ {}
+ };
+ unsigned int cfg;
+
+ if (strcmp(codec->chip_name, "ALC271X"))
+ return;
+ cfg = snd_hda_codec_get_pincfg(codec, 0x12);
+ if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED)
+ snd_hda_sequence_write(codec, verbs);
+}
+
enum {
ALC269_FIXUP_SONY_VAIO,
ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -14876,6 +14893,7 @@ enum {
ALC269_FIXUP_ASUS_G73JW,
ALC269_FIXUP_LENOVO_EAPD,
ALC275_FIXUP_SONY_HWEQ,
+ ALC271_FIXUP_DMIC,
};
static const struct alc_fixup alc269_fixups[] = {
@@ -14929,7 +14947,11 @@ static const struct alc_fixup alc269_fixups[] = {
.v.func = alc269_fixup_hweq,
.chained = true,
.chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2
- }
+ },
+ [ALC271_FIXUP_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc271_fixup_dmic,
+ },
};
static struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -14938,6 +14960,7 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 1395991c39f2..94d19c03a7f4 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -3408,6 +3408,9 @@ static int get_connection_index(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t conn[HDA_MAX_NUM_INPUTS];
int i, nums;
+ if (!(get_wcaps(codec, mux) & AC_WCAP_CONN_LIST))
+ return -1;
+
nums = snd_hda_get_connections(codec, mux, conn, ARRAY_SIZE(conn));
for (i = 0; i < nums; i++)
if (conn[i] == nid)
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index f7cd346fd727..f5ccdbf7ebc6 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -308,8 +308,6 @@ static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
ARRAY_SIZE(jz4740_codec_dapm_routes));
- snd_soc_dapm_new_widgets(codec);
-
jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index a54d2a5b28f6..4d9fb279e146 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -927,7 +927,7 @@ static struct platform_driver sn95031_codec_driver = {
.owner = THIS_MODULE,
},
.probe = sn95031_device_probe,
- .remove = sn95031_device_remove,
+ .remove = __devexit_p(sn95031_device_remove),
};
static int __init sn95031_init(void)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index ae1cadfae84c..f52b623bb692 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -247,8 +247,6 @@ static int wm8903_volatile_register(struct snd_soc_codec *codec, unsigned int re
case WM8903_REVISION_NUMBER:
case WM8903_INTERRUPT_STATUS_1:
case WM8903_WRITE_SEQUENCER_4:
- case WM8903_POWER_MANAGEMENT_3:
- case WM8903_POWER_MANAGEMENT_2:
case WM8903_DC_SERVO_READBACK_1:
case WM8903_DC_SERVO_READBACK_2:
case WM8903_DC_SERVO_READBACK_3:
@@ -875,34 +873,40 @@ SND_SOC_DAPM_MIXER("Left Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 1, 0,
SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0,
right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
-SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
- 4, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0,
+SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
+ 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
0, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
+SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 1, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
+SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 0, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPL_ENA", 1, WM8903_ANALOGUE_HP_0, 4, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0),
-SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA_S("HPR_ENA", 1, WM8903_ANALOGUE_HP_0, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTL_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0,
NULL, 0),
SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0,
NULL, 0),
-SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA_S("LINEOUTR_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
NULL, 0),
SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0),
@@ -1037,10 +1041,14 @@ static const struct snd_soc_dapm_route intercon[] = {
{ "Left Speaker PGA", NULL, "Left Speaker Mixer" },
{ "Right Speaker PGA", NULL, "Right Speaker Mixer" },
- { "HPL_ENA_DLY", NULL, "Left Headphone Output PGA" },
- { "HPR_ENA_DLY", NULL, "Right Headphone Output PGA" },
- { "LINEOUTL_ENA_DLY", NULL, "Left Line Output PGA" },
- { "LINEOUTR_ENA_DLY", NULL, "Right Line Output PGA" },
+ { "HPL_ENA", NULL, "Left Headphone Output PGA" },
+ { "HPR_ENA", NULL, "Right Headphone Output PGA" },
+ { "HPL_ENA_DLY", NULL, "HPL_ENA" },
+ { "HPR_ENA_DLY", NULL, "HPR_ENA" },
+ { "LINEOUTL_ENA", NULL, "Left Line Output PGA" },
+ { "LINEOUTR_ENA", NULL, "Right Line Output PGA" },
+ { "LINEOUTL_ENA_DLY", NULL, "LINEOUTL_ENA" },
+ { "LINEOUTR_ENA_DLY", NULL, "LINEOUTR_ENA" },
{ "HPL_DCS", NULL, "DCS Master" },
{ "HPR_DCS", NULL, "DCS Master" },
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 3290333b2bb9..84e1bd1d2822 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3261,20 +3261,36 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
/* Latch volume updates (right only; we always do left then right). */
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
+ WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
+ snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
+ WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
+ snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
+ WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
+ snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
+ WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
+ snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
+ WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
+ snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
+ WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
+ snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
+ WM8994_DAC1_VU, WM8994_DAC1_VU);
snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
WM8994_DAC1_VU, WM8994_DAC1_VU);
+ snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
+ WM8994_DAC2_VU, WM8994_DAC2_VU);
snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
WM8994_DAC2_VU, WM8994_DAC2_VU);
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 7b6b3c18e299..4005e9af5d61 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -740,12 +740,12 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
{ "SPKL", "Input Switch", "MIXINL" },
{ "SPKL", "IN1LP Switch", "IN1LP" },
- { "SPKL", "Output Switch", "Left Output Mixer" },
+ { "SPKL", "Output Switch", "Left Output PGA" },
{ "SPKL", NULL, "TOCLK" },
{ "SPKR", "Input Switch", "MIXINR" },
{ "SPKR", "IN1RP Switch", "IN1RP" },
- { "SPKR", "Output Switch", "Right Output Mixer" },
+ { "SPKR", "Output Switch", "Right Output PGA" },
{ "SPKR", NULL, "TOCLK" },
{ "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
@@ -767,8 +767,8 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
{ "SPKOUTRP", NULL, "SPKR Driver" },
{ "SPKOUTRN", NULL, "SPKR Driver" },
- { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
- { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
+ { "Left Headphone Mux", "Mixer", "Left Output PGA" },
+ { "Right Headphone Mux", "Mixer", "Right Output PGA" },
{ "Headphone PGA", NULL, "Left Headphone Mux" },
{ "Headphone PGA", NULL, "Right Headphone Mux" },
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c
index b2e9198a983a..d567c322a2fb 100644
--- a/sound/soc/mid-x86/sst_platform.c
+++ b/sound/soc/mid-x86/sst_platform.c
@@ -116,18 +116,20 @@ struct snd_soc_dai_driver sst_platform_dai[] = {
static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
int state)
{
- spin_lock(&stream->status_lock);
+ unsigned long flags;
+ spin_lock_irqsave(&stream->status_lock, flags);
stream->stream_status = state;
- spin_unlock(&stream->status_lock);
+ spin_unlock_irqrestore(&stream->status_lock, flags);
}
static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
{
int state;
+ unsigned long flags;
- spin_lock(&stream->status_lock);
+ spin_lock_irqsave(&stream->status_lock, flags);
state = stream->stream_status;
- spin_unlock(&stream->status_lock);
+ spin_unlock_irqrestore(&stream->status_lock, flags);
return state;
}
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 02fb66416ddc..2ce0b2d891d5 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -65,6 +65,7 @@ static int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
if (prtd->dma_ch >= 0) {
pxa_free_dma(prtd->dma_ch);
prtd->dma_ch = -1;
+ prtd->params = NULL;
}
return 0;
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index ac577263b3e3..b6445757fc54 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -167,7 +167,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa2xx-ac97",
- .codec_name = "wm9713-hifi",
+ .codec_dai_name = "wm9713-hifi",
.init = zylonite_wm9713_init,
},
{
@@ -176,7 +176,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa2xx-ac97-aux",
- .codec_name = "wm9713-aux",
+ .codec_dai_name = "wm9713-aux",
},
{
.name = "WM9713 Voice",
@@ -184,7 +184,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
.codec_name = "wm9713-codec",
.platform_name = "pxa-pcm-audio",
.cpu_dai_name = "pxa-ssp-dai.2",
- .codec_name = "wm9713-voice",
+ .codec_dai_name = "wm9713-voice",
.ops = &zylonite_voice_ops,
},
};
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 38aac7d57a59..9c7e8b48aed6 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -350,8 +350,8 @@ static int s3c_pcm_set_fmt(struct snd_soc_dai *cpu_dai,
ctl = readl(regs + S3C_PCM_CTL);
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- /* Nothing to do, NB_NF by default */
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Nothing to do, IB_NF by default */
break;
default:
dev_err(pcm->dev, "Unsupported clock inversion!\n");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 0c9997e2d8c0..23c0e83d4c19 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1200,10 +1200,11 @@ static int fsi_probe(struct platform_device *pdev)
master->fsib.master = master;
pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
dev_set_drvdata(&pdev->dev, master);
+ pm_runtime_get_sync(&pdev->dev);
fsi_soft_all_reset(master);
+ pm_runtime_put_sync(&pdev->dev);
ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED,
id_entry->name, master);
@@ -1218,8 +1219,17 @@ static int fsi_probe(struct platform_device *pdev)
goto exit_free_irq;
}
- return snd_soc_register_dais(&pdev->dev, fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
+ ret = snd_soc_register_dais(&pdev->dev, fsi_soc_dai,
+ ARRAY_SIZE(fsi_soc_dai));
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot snd dai register\n");
+ goto exit_snd_soc;
+ }
+
+ return ret;
+exit_snd_soc:
+ snd_soc_unregister_platform(&pdev->dev);
exit_free_irq:
free_irq(irq, master);
exit_iounmap:
@@ -1238,12 +1248,11 @@ static int fsi_remove(struct platform_device *pdev)
master = dev_get_drvdata(&pdev->dev);
- snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
- snd_soc_unregister_platform(&pdev->dev);
-
+ free_irq(master->irq, master);
pm_runtime_disable(&pdev->dev);
- free_irq(master->irq, master);
+ snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
+ snd_soc_unregister_platform(&pdev->dev);
iounmap(master->base);
kfree(master);
@@ -1321,3 +1330,4 @@ module_exit(fsi_mobile_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_ALIAS("platform:fsi-pcm-audio");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4dda58926bc5..d8562ce4de7a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -92,8 +92,8 @@ static int min_bytes_needed(unsigned long val)
static int format_register_str(struct snd_soc_codec *codec,
unsigned int reg, char *buf, size_t len)
{
- int wordsize = codec->driver->reg_word_size * 2;
- int regsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
+ int wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
+ int regsize = codec->driver->reg_word_size * 2;
int ret;
char tmpbuf[len + 1];
char regbuf[regsize + 1];
@@ -132,8 +132,8 @@ static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf,
size_t total = 0;
loff_t p = 0;
- wordsize = codec->driver->reg_word_size * 2;
- regsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
+ wordsize = min_bytes_needed(codec->driver->reg_cache_size) * 2;
+ regsize = codec->driver->reg_word_size * 2;
len = wordsize + regsize + 2 + 1;
@@ -629,6 +629,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
runtime->hw.rates |= codec_dai_drv->capture.rates;
}
+ ret = -EINVAL;
snd_pcm_limit_hw_rates(runtime);
if (!runtime->hw.rates) {
printk(KERN_ERR "asoc: %s <-> %s No matching rates\n",
@@ -640,7 +641,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
codec_dai->name, cpu_dai->name);
goto config_err;
}
- if (!runtime->hw.channels_min || !runtime->hw.channels_max) {
+ if (!runtime->hw.channels_min || !runtime->hw.channels_max ||
+ runtime->hw.channels_min > runtime->hw.channels_max) {
printk(KERN_ERR "asoc: %s <-> %s No matching channels\n",
codec_dai->name, cpu_dai->name);
goto config_err;
@@ -2060,6 +2062,7 @@ const struct dev_pm_ops snd_soc_pm_ops = {
.resume = snd_soc_resume,
.poweroff = snd_soc_poweroff,
};
+EXPORT_SYMBOL_GPL(snd_soc_pm_ops);
/* ASoC platform driver */
static struct platform_driver soc_driver = {
diff --git a/sound/soc/tegra/harmony.c b/sound/soc/tegra/harmony.c
index 8585957477eb..556a57133925 100644
--- a/sound/soc/tegra/harmony.c
+++ b/sound/soc/tegra/harmony.c
@@ -370,6 +370,7 @@ static struct platform_driver tegra_snd_harmony_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
},
.probe = tegra_snd_harmony_probe,
.remove = __devexit_p(tegra_snd_harmony_remove),
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index b4b39c0b6c9e..f9289102886a 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1301,6 +1301,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi* umidi,
case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
case USB_ID(0x15ca, 0x1806): /* Textech USB Midi Cable */
case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */
+ case USB_ID(0xfc08, 0x0101): /* Unknown vendor Cable */
ep->max_transfer = 4;
break;
/*